var/home/core/zuul-output/0000755000175000017500000000000015066505606014536 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015066524342015500 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005116636615066524333017722 0ustar rootrootSep 29 13:41:12 crc systemd[1]: Starting Kubernetes Kubelet... Sep 29 13:41:12 crc restorecon[4724]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Sep 29 13:41:12 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Sep 29 13:41:13 crc restorecon[4724]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Sep 29 13:41:13 crc kubenswrapper[4869]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Sep 29 13:41:13 crc kubenswrapper[4869]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Sep 29 13:41:13 crc kubenswrapper[4869]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Sep 29 13:41:13 crc kubenswrapper[4869]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Sep 29 13:41:13 crc kubenswrapper[4869]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Sep 29 13:41:13 crc kubenswrapper[4869]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.992269 4869 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996731 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996788 4869 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996795 4869 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996801 4869 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996807 4869 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996815 4869 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996821 4869 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996827 4869 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996831 4869 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996836 4869 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996840 4869 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996845 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996850 4869 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996859 4869 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996864 4869 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996870 4869 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996874 4869 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996879 4869 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996884 4869 feature_gate.go:330] unrecognized feature gate: PlatformOperators Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996889 4869 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996894 4869 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996899 4869 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996904 4869 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996909 4869 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996913 4869 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996918 4869 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996922 4869 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996927 4869 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996931 4869 feature_gate.go:330] unrecognized feature gate: InsightsConfig Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996935 4869 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996940 4869 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996945 4869 feature_gate.go:330] unrecognized feature gate: Example Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996949 4869 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996953 4869 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996958 4869 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996963 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996967 4869 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996972 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996979 4869 feature_gate.go:330] unrecognized feature gate: PinnedImages Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996985 4869 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996990 4869 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.996995 4869 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997002 4869 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997008 4869 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997014 4869 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997020 4869 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997024 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997030 4869 feature_gate.go:330] unrecognized feature gate: GatewayAPI Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997035 4869 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997040 4869 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997044 4869 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997048 4869 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997054 4869 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997060 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997065 4869 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997070 4869 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997075 4869 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997079 4869 feature_gate.go:330] unrecognized feature gate: SignatureStores Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997084 4869 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997088 4869 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997092 4869 feature_gate.go:330] unrecognized feature gate: NewOLM Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997098 4869 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997102 4869 feature_gate.go:330] unrecognized feature gate: OVNObservability Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997108 4869 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997112 4869 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997116 4869 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997122 4869 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997128 4869 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997134 4869 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997138 4869 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Sep 29 13:41:13 crc kubenswrapper[4869]: W0929 13:41:13.997143 4869 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998609 4869 flags.go:64] FLAG: --address="0.0.0.0" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998702 4869 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998730 4869 flags.go:64] FLAG: --anonymous-auth="true" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998740 4869 flags.go:64] FLAG: --application-metrics-count-limit="100" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998754 4869 flags.go:64] FLAG: --authentication-token-webhook="false" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998762 4869 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998782 4869 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998794 4869 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998804 4869 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998811 4869 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998818 4869 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998824 4869 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998830 4869 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998845 4869 flags.go:64] FLAG: --cgroup-root="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998851 4869 flags.go:64] FLAG: --cgroups-per-qos="true" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998856 4869 flags.go:64] FLAG: --client-ca-file="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998862 4869 flags.go:64] FLAG: --cloud-config="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998868 4869 flags.go:64] FLAG: --cloud-provider="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998873 4869 flags.go:64] FLAG: --cluster-dns="[]" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998881 4869 flags.go:64] FLAG: --cluster-domain="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998886 4869 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998892 4869 flags.go:64] FLAG: --config-dir="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998898 4869 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998903 4869 flags.go:64] FLAG: --container-log-max-files="5" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998912 4869 flags.go:64] FLAG: --container-log-max-size="10Mi" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998918 4869 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998923 4869 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998930 4869 flags.go:64] FLAG: --containerd-namespace="k8s.io" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998936 4869 flags.go:64] FLAG: --contention-profiling="false" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998943 4869 flags.go:64] FLAG: --cpu-cfs-quota="true" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998948 4869 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998957 4869 flags.go:64] FLAG: --cpu-manager-policy="none" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998962 4869 flags.go:64] FLAG: --cpu-manager-policy-options="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998972 4869 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998978 4869 flags.go:64] FLAG: --enable-controller-attach-detach="true" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998985 4869 flags.go:64] FLAG: --enable-debugging-handlers="true" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.998992 4869 flags.go:64] FLAG: --enable-load-reader="false" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999001 4869 flags.go:64] FLAG: --enable-server="true" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999008 4869 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999020 4869 flags.go:64] FLAG: --event-burst="100" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999033 4869 flags.go:64] FLAG: --event-qps="50" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999040 4869 flags.go:64] FLAG: --event-storage-age-limit="default=0" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999047 4869 flags.go:64] FLAG: --event-storage-event-limit="default=0" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999057 4869 flags.go:64] FLAG: --eviction-hard="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999066 4869 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999073 4869 flags.go:64] FLAG: --eviction-minimum-reclaim="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999079 4869 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999085 4869 flags.go:64] FLAG: --eviction-soft="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999090 4869 flags.go:64] FLAG: --eviction-soft-grace-period="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999097 4869 flags.go:64] FLAG: --exit-on-lock-contention="false" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999103 4869 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999108 4869 flags.go:64] FLAG: --experimental-mounter-path="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999114 4869 flags.go:64] FLAG: --fail-cgroupv1="false" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999119 4869 flags.go:64] FLAG: --fail-swap-on="true" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999126 4869 flags.go:64] FLAG: --feature-gates="" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999134 4869 flags.go:64] FLAG: --file-check-frequency="20s" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999140 4869 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Sep 29 13:41:13 crc kubenswrapper[4869]: I0929 13:41:13.999146 4869 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999153 4869 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999159 4869 flags.go:64] FLAG: --healthz-port="10248" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999165 4869 flags.go:64] FLAG: --help="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999171 4869 flags.go:64] FLAG: --hostname-override="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999177 4869 flags.go:64] FLAG: --housekeeping-interval="10s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999183 4869 flags.go:64] FLAG: --http-check-frequency="20s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999189 4869 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999195 4869 flags.go:64] FLAG: --image-credential-provider-config="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999202 4869 flags.go:64] FLAG: --image-gc-high-threshold="85" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999208 4869 flags.go:64] FLAG: --image-gc-low-threshold="80" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999214 4869 flags.go:64] FLAG: --image-service-endpoint="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999219 4869 flags.go:64] FLAG: --kernel-memcg-notification="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999226 4869 flags.go:64] FLAG: --kube-api-burst="100" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999233 4869 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999239 4869 flags.go:64] FLAG: --kube-api-qps="50" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999245 4869 flags.go:64] FLAG: --kube-reserved="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999250 4869 flags.go:64] FLAG: --kube-reserved-cgroup="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999255 4869 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999262 4869 flags.go:64] FLAG: --kubelet-cgroups="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999267 4869 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999274 4869 flags.go:64] FLAG: --lock-file="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999279 4869 flags.go:64] FLAG: --log-cadvisor-usage="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999285 4869 flags.go:64] FLAG: --log-flush-frequency="5s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999291 4869 flags.go:64] FLAG: --log-json-info-buffer-size="0" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999303 4869 flags.go:64] FLAG: --log-json-split-stream="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999309 4869 flags.go:64] FLAG: --log-text-info-buffer-size="0" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999314 4869 flags.go:64] FLAG: --log-text-split-stream="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999320 4869 flags.go:64] FLAG: --logging-format="text" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999325 4869 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999331 4869 flags.go:64] FLAG: --make-iptables-util-chains="true" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999336 4869 flags.go:64] FLAG: --manifest-url="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999342 4869 flags.go:64] FLAG: --manifest-url-header="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999352 4869 flags.go:64] FLAG: --max-housekeeping-interval="15s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999358 4869 flags.go:64] FLAG: --max-open-files="1000000" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999365 4869 flags.go:64] FLAG: --max-pods="110" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999370 4869 flags.go:64] FLAG: --maximum-dead-containers="-1" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999375 4869 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999380 4869 flags.go:64] FLAG: --memory-manager-policy="None" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999385 4869 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999390 4869 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999399 4869 flags.go:64] FLAG: --node-ip="192.168.126.11" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999405 4869 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999427 4869 flags.go:64] FLAG: --node-status-max-images="50" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999433 4869 flags.go:64] FLAG: --node-status-update-frequency="10s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999439 4869 flags.go:64] FLAG: --oom-score-adj="-999" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999444 4869 flags.go:64] FLAG: --pod-cidr="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999450 4869 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999465 4869 flags.go:64] FLAG: --pod-manifest-path="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999471 4869 flags.go:64] FLAG: --pod-max-pids="-1" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999477 4869 flags.go:64] FLAG: --pods-per-core="0" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999483 4869 flags.go:64] FLAG: --port="10250" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999489 4869 flags.go:64] FLAG: --protect-kernel-defaults="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999496 4869 flags.go:64] FLAG: --provider-id="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999502 4869 flags.go:64] FLAG: --qos-reserved="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999509 4869 flags.go:64] FLAG: --read-only-port="10255" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999515 4869 flags.go:64] FLAG: --register-node="true" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999523 4869 flags.go:64] FLAG: --register-schedulable="true" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999530 4869 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999544 4869 flags.go:64] FLAG: --registry-burst="10" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999549 4869 flags.go:64] FLAG: --registry-qps="5" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999556 4869 flags.go:64] FLAG: --reserved-cpus="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999562 4869 flags.go:64] FLAG: --reserved-memory="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999570 4869 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999577 4869 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999584 4869 flags.go:64] FLAG: --rotate-certificates="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999589 4869 flags.go:64] FLAG: --rotate-server-certificates="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999594 4869 flags.go:64] FLAG: --runonce="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999599 4869 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999605 4869 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999632 4869 flags.go:64] FLAG: --seccomp-default="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999639 4869 flags.go:64] FLAG: --serialize-image-pulls="true" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999645 4869 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999652 4869 flags.go:64] FLAG: --storage-driver-db="cadvisor" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999659 4869 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999667 4869 flags.go:64] FLAG: --storage-driver-password="root" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999672 4869 flags.go:64] FLAG: --storage-driver-secure="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999677 4869 flags.go:64] FLAG: --storage-driver-table="stats" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999682 4869 flags.go:64] FLAG: --storage-driver-user="root" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999688 4869 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999693 4869 flags.go:64] FLAG: --sync-frequency="1m0s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999698 4869 flags.go:64] FLAG: --system-cgroups="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999703 4869 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999712 4869 flags.go:64] FLAG: --system-reserved-cgroup="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999718 4869 flags.go:64] FLAG: --tls-cert-file="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999725 4869 flags.go:64] FLAG: --tls-cipher-suites="[]" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999732 4869 flags.go:64] FLAG: --tls-min-version="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999738 4869 flags.go:64] FLAG: --tls-private-key-file="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999743 4869 flags.go:64] FLAG: --topology-manager-policy="none" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999749 4869 flags.go:64] FLAG: --topology-manager-policy-options="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999754 4869 flags.go:64] FLAG: --topology-manager-scope="container" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999761 4869 flags.go:64] FLAG: --v="2" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999770 4869 flags.go:64] FLAG: --version="false" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999778 4869 flags.go:64] FLAG: --vmodule="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999786 4869 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:13.999791 4869 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:13.999990 4869 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000000 4869 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000008 4869 feature_gate.go:330] unrecognized feature gate: PlatformOperators Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000014 4869 feature_gate.go:330] unrecognized feature gate: Example Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000019 4869 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000025 4869 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000030 4869 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000035 4869 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000040 4869 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000045 4869 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000051 4869 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000056 4869 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000061 4869 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000066 4869 feature_gate.go:330] unrecognized feature gate: NewOLM Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000072 4869 feature_gate.go:330] unrecognized feature gate: SignatureStores Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000077 4869 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000083 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000088 4869 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000094 4869 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000099 4869 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000105 4869 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000111 4869 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000117 4869 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000123 4869 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000128 4869 feature_gate.go:330] unrecognized feature gate: InsightsConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000134 4869 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000140 4869 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000146 4869 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000151 4869 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000156 4869 feature_gate.go:330] unrecognized feature gate: PinnedImages Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000161 4869 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000180 4869 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000185 4869 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000190 4869 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000195 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000199 4869 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000203 4869 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000208 4869 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000213 4869 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000217 4869 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000221 4869 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000225 4869 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000230 4869 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000235 4869 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000240 4869 feature_gate.go:330] unrecognized feature gate: GatewayAPI Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000244 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000251 4869 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000256 4869 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000260 4869 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000265 4869 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000270 4869 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000274 4869 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000279 4869 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000284 4869 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000289 4869 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000293 4869 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000297 4869 feature_gate.go:330] unrecognized feature gate: OVNObservability Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000301 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000306 4869 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000310 4869 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000315 4869 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000320 4869 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000325 4869 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000329 4869 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000335 4869 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000341 4869 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000347 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000362 4869 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000368 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000374 4869 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.000379 4869 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.000399 4869 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.012330 4869 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.012388 4869 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012493 4869 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012509 4869 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012516 4869 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012522 4869 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012528 4869 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012536 4869 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012541 4869 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012547 4869 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012552 4869 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012558 4869 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012563 4869 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012568 4869 feature_gate.go:330] unrecognized feature gate: GatewayAPI Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012574 4869 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012579 4869 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012584 4869 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012589 4869 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012595 4869 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012600 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012611 4869 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012632 4869 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012638 4869 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012643 4869 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012649 4869 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012655 4869 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012660 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012665 4869 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012671 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012676 4869 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012681 4869 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012687 4869 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012693 4869 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012699 4869 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012704 4869 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012710 4869 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012718 4869 feature_gate.go:330] unrecognized feature gate: InsightsConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012724 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012729 4869 feature_gate.go:330] unrecognized feature gate: NewOLM Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012735 4869 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012741 4869 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012746 4869 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012753 4869 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012761 4869 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012766 4869 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012772 4869 feature_gate.go:330] unrecognized feature gate: PinnedImages Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012779 4869 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012785 4869 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012790 4869 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012795 4869 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012800 4869 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012806 4869 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012811 4869 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012816 4869 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012823 4869 feature_gate.go:330] unrecognized feature gate: PlatformOperators Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012829 4869 feature_gate.go:330] unrecognized feature gate: SignatureStores Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012834 4869 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012840 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012845 4869 feature_gate.go:330] unrecognized feature gate: OVNObservability Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012851 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012858 4869 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012866 4869 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012873 4869 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012880 4869 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012885 4869 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012891 4869 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012896 4869 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012902 4869 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012907 4869 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012912 4869 feature_gate.go:330] unrecognized feature gate: Example Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012917 4869 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012923 4869 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.012929 4869 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.012940 4869 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013107 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013117 4869 feature_gate.go:330] unrecognized feature gate: InsightsConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013124 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013130 4869 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013136 4869 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013141 4869 feature_gate.go:330] unrecognized feature gate: Example Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013147 4869 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013152 4869 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013159 4869 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013164 4869 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013169 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013174 4869 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013181 4869 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013189 4869 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013194 4869 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013199 4869 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013205 4869 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013210 4869 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013216 4869 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013221 4869 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013226 4869 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013231 4869 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013236 4869 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013242 4869 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013247 4869 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013255 4869 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013262 4869 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013269 4869 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013276 4869 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013283 4869 feature_gate.go:330] unrecognized feature gate: PlatformOperators Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013289 4869 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013295 4869 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013300 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013306 4869 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013313 4869 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013318 4869 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013323 4869 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013328 4869 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013334 4869 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013339 4869 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013344 4869 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013349 4869 feature_gate.go:330] unrecognized feature gate: PinnedImages Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013354 4869 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013359 4869 feature_gate.go:330] unrecognized feature gate: OVNObservability Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013364 4869 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013372 4869 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013377 4869 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013382 4869 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013387 4869 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013392 4869 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013398 4869 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013403 4869 feature_gate.go:330] unrecognized feature gate: SignatureStores Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013408 4869 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013413 4869 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013418 4869 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013424 4869 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013429 4869 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013434 4869 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013439 4869 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013444 4869 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013449 4869 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013454 4869 feature_gate.go:330] unrecognized feature gate: NewOLM Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013459 4869 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013465 4869 feature_gate.go:330] unrecognized feature gate: GatewayAPI Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013470 4869 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013475 4869 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013480 4869 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013486 4869 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013492 4869 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013499 4869 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.013506 4869 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.013516 4869 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.013848 4869 server.go:940] "Client rotation is on, will bootstrap in background" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.019334 4869 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.019452 4869 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.021844 4869 server.go:997] "Starting client certificate rotation" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.021893 4869 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.023021 4869 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-04 07:01:45.341648298 +0000 UTC Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.023117 4869 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 2321h20m31.318535319s for next certificate rotation Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.053504 4869 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.056793 4869 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.074700 4869 log.go:25] "Validated CRI v1 runtime API" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.115134 4869 log.go:25] "Validated CRI v1 image API" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.117334 4869 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.125429 4869 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-09-29-11-07-00-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.125475 4869 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.145751 4869 manager.go:217] Machine: {Timestamp:2025-09-29 13:41:14.141241931 +0000 UTC m=+0.581886271 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:5122158e-3b60-4d27-a340-00c79e99c195 BootID:2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37 Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:fe:fb:96 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:fe:fb:96 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:fc:83:9d Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:97:39:4d Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:4c:4c:95 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:99:c9:b8 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:f3:7f:2a Speed:-1 Mtu:1496} {Name:eth10 MacAddress:b6:71:e8:50:db:27 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:62:28:9a:b0:76:04 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.146105 4869 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.146362 4869 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.147081 4869 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.147306 4869 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.147353 4869 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.147750 4869 topology_manager.go:138] "Creating topology manager with none policy" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.147761 4869 container_manager_linux.go:303] "Creating device plugin manager" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.148232 4869 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.148267 4869 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.148991 4869 state_mem.go:36] "Initialized new in-memory state store" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.149087 4869 server.go:1245] "Using root directory" path="/var/lib/kubelet" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.154068 4869 kubelet.go:418] "Attempting to sync node with API server" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.154092 4869 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.154121 4869 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.154140 4869 kubelet.go:324] "Adding apiserver pod source" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.154154 4869 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.160934 4869 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.162717 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.162831 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.162990 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.163034 4869 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.163002 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.166574 4869 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168466 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168492 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168500 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168507 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168518 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168527 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168537 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168547 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168557 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168566 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168579 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.168585 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.170584 4869 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.171327 4869 server.go:1280] "Started kubelet" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.176079 4869 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.176711 4869 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.177988 4869 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:14 crc systemd[1]: Started Kubernetes Kubelet. Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.179865 4869 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.186316 4869 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.186751 4869 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.186970 4869 volume_manager.go:287] "The desired_state_of_world populator starts" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.186994 4869 volume_manager.go:289] "Starting Kubelet Volume Manager" Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.186995 4869 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.186837 4869 server.go:460] "Adding debug handlers to kubelet server" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.187149 4869 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.186821 4869 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 10:32:28.746294894 +0000 UTC Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.187320 4869 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 2132h51m14.558983202s for next certificate rotation Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.186360 4869 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.80:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1869c4951c71f40b default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-09-29 13:41:14.171290635 +0000 UTC m=+0.611934945,LastTimestamp:2025-09-29 13:41:14.171290635 +0000 UTC m=+0.611934945,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.188031 4869 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="200ms" Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.188074 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.188183 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193069 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193126 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193139 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193147 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193158 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193166 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193174 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193182 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193192 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193204 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193215 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193224 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193233 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193242 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193250 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193267 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193279 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193288 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193296 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193305 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193314 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193326 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193342 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193351 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193360 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193368 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193379 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193388 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193401 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193410 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193418 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193427 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193440 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193464 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193472 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193482 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193502 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193510 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193520 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193528 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193537 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193545 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193554 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193562 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193569 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193578 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193586 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193594 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193602 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193613 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193632 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193640 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193683 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193693 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193705 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193714 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193722 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193731 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193740 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193750 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193759 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193769 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193777 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193785 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193794 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193801 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193811 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193820 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193829 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193836 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193845 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193855 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193863 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193871 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193880 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193888 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193896 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193905 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193917 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193928 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193937 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193948 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193956 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193965 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193974 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193982 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193991 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.193999 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194008 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194017 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194026 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194034 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194043 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194052 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194063 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194071 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194080 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194089 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194097 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194106 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194114 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194124 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194134 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194142 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194155 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194165 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194174 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194184 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194195 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194205 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194218 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194227 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194237 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194245 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194254 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194263 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194271 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194281 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194290 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194300 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194309 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194318 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194326 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194336 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194347 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194356 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194365 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194375 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194384 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194394 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194404 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194413 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194424 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194434 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194445 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194454 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194463 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194467 4869 factory.go:153] Registering CRI-O factory Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194472 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194682 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194709 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194718 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194727 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194738 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194747 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194758 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194768 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194779 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194796 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194806 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194815 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194827 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194837 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194846 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194856 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194866 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194876 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194886 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194895 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194904 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194913 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194922 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194931 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194939 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194947 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194956 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194965 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194973 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194982 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194990 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.194998 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195007 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195016 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195025 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195034 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195044 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195053 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195062 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195071 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195079 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195088 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195096 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195105 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195113 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195122 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195132 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195141 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195150 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195158 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195166 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195174 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195182 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195190 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195199 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195208 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195218 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195226 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195235 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195243 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195252 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195262 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195272 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195282 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195291 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195301 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195311 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195319 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.195328 4869 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196078 4869 factory.go:221] Registration of the crio container factory successfully Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196150 4869 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196161 4869 factory.go:55] Registering systemd factory Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196167 4869 factory.go:221] Registration of the systemd container factory successfully Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196189 4869 factory.go:103] Registering Raw factory Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196201 4869 manager.go:1196] Started watching for new ooms in manager Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196723 4869 manager.go:319] Starting recovery of all containers Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196762 4869 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196782 4869 reconstruct.go:97] "Volume reconstruction finished" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.196789 4869 reconciler.go:26] "Reconciler: start to sync state" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.218995 4869 manager.go:324] Recovery completed Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.230639 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.233151 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.233444 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.233534 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.235153 4869 cpu_manager.go:225] "Starting CPU manager" policy="none" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.235185 4869 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.235208 4869 state_mem.go:36] "Initialized new in-memory state store" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.236076 4869 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.240440 4869 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.240501 4869 status_manager.go:217] "Starting to sync pod status with apiserver" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.240545 4869 kubelet.go:2335] "Starting kubelet main sync loop" Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.240600 4869 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.241236 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.241311 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.249248 4869 policy_none.go:49] "None policy: Start" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.250324 4869 memory_manager.go:170] "Starting memorymanager" policy="None" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.250366 4869 state_mem.go:35] "Initializing new in-memory state store" Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.287137 4869 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.308481 4869 manager.go:334] "Starting Device Plugin manager" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.308565 4869 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.308589 4869 server.go:79] "Starting device plugin registration server" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.309397 4869 eviction_manager.go:189] "Eviction manager: starting control loop" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.309434 4869 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.309889 4869 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.310067 4869 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.310081 4869 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.318033 4869 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.341310 4869 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.341451 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.342721 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.342773 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.342786 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.342947 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.343183 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.343248 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344086 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344120 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344132 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344287 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344403 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344435 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344578 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344606 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344638 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344916 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344937 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.344947 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.345022 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.345028 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.345037 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.345047 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.345355 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.345395 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346098 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346126 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346140 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346265 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346282 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346293 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346420 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346664 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.346698 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.347045 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.347068 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.347078 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.347231 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.347259 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.347291 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.347305 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.347314 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.348051 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.348077 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.348089 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.389575 4869 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="400ms" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398010 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398065 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398106 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398135 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398265 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398327 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398363 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398445 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398479 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398505 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398528 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398553 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398581 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398636 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.398663 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.409886 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.411648 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.411700 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.411712 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.411748 4869 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.412236 4869 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500084 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500145 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500171 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500199 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500216 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500236 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500260 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500278 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500283 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500297 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500319 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500343 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500411 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500422 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500422 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500480 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500463 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500469 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500575 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500581 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500624 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500618 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500666 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500683 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500670 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500647 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500659 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500688 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.500746 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.501284 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.613411 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.615419 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.615461 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.615476 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.615507 4869 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.616267 4869 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.664460 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.672725 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.678721 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.696785 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: I0929 13:41:14.702299 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.725006 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-e4e28f1f6bdabaa3936c1b101054046d1f61e620b9bb277d377e2ab907d6e470 WatchSource:0}: Error finding container e4e28f1f6bdabaa3936c1b101054046d1f61e620b9bb277d377e2ab907d6e470: Status 404 returned error can't find the container with id e4e28f1f6bdabaa3936c1b101054046d1f61e620b9bb277d377e2ab907d6e470 Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.726030 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-238ca7d494772600c9c44c6b3cc8a70b58b699b6a3917ad5267aafaa65fd5124 WatchSource:0}: Error finding container 238ca7d494772600c9c44c6b3cc8a70b58b699b6a3917ad5267aafaa65fd5124: Status 404 returned error can't find the container with id 238ca7d494772600c9c44c6b3cc8a70b58b699b6a3917ad5267aafaa65fd5124 Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.730991 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-fb908e9d828ad472e06e88be8e44f4fc405f7145b72b1a5377dbff739292e5b8 WatchSource:0}: Error finding container fb908e9d828ad472e06e88be8e44f4fc405f7145b72b1a5377dbff739292e5b8: Status 404 returned error can't find the container with id fb908e9d828ad472e06e88be8e44f4fc405f7145b72b1a5377dbff739292e5b8 Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.733427 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-f0e802e651f62314cadcf7a1e9cfc563e7beb2e2beb587c5f44a70de19f4affb WatchSource:0}: Error finding container f0e802e651f62314cadcf7a1e9cfc563e7beb2e2beb587c5f44a70de19f4affb: Status 404 returned error can't find the container with id f0e802e651f62314cadcf7a1e9cfc563e7beb2e2beb587c5f44a70de19f4affb Sep 29 13:41:14 crc kubenswrapper[4869]: W0929 13:41:14.737534 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-3dd9af8c4bcc833bb6c0913cc05fcf9588c3f7f62dbde3b33f63580c69f7dcc3 WatchSource:0}: Error finding container 3dd9af8c4bcc833bb6c0913cc05fcf9588c3f7f62dbde3b33f63580c69f7dcc3: Status 404 returned error can't find the container with id 3dd9af8c4bcc833bb6c0913cc05fcf9588c3f7f62dbde3b33f63580c69f7dcc3 Sep 29 13:41:14 crc kubenswrapper[4869]: E0929 13:41:14.790817 4869 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="800ms" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.016409 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.018206 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.018264 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.018277 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.018319 4869 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 29 13:41:15 crc kubenswrapper[4869]: E0929 13:41:15.019017 4869 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Sep 29 13:41:15 crc kubenswrapper[4869]: W0929 13:41:15.124962 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:15 crc kubenswrapper[4869]: E0929 13:41:15.125063 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.179592 4869 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.252290 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fb908e9d828ad472e06e88be8e44f4fc405f7145b72b1a5377dbff739292e5b8"} Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.253993 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"238ca7d494772600c9c44c6b3cc8a70b58b699b6a3917ad5267aafaa65fd5124"} Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.256985 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e4e28f1f6bdabaa3936c1b101054046d1f61e620b9bb277d377e2ab907d6e470"} Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.258349 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3dd9af8c4bcc833bb6c0913cc05fcf9588c3f7f62dbde3b33f63580c69f7dcc3"} Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.259847 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"f0e802e651f62314cadcf7a1e9cfc563e7beb2e2beb587c5f44a70de19f4affb"} Sep 29 13:41:15 crc kubenswrapper[4869]: W0929 13:41:15.472364 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:15 crc kubenswrapper[4869]: E0929 13:41:15.472842 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:15 crc kubenswrapper[4869]: E0929 13:41:15.592324 4869 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="1.6s" Sep 29 13:41:15 crc kubenswrapper[4869]: W0929 13:41:15.650475 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:15 crc kubenswrapper[4869]: E0929 13:41:15.650576 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:15 crc kubenswrapper[4869]: W0929 13:41:15.693456 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:15 crc kubenswrapper[4869]: E0929 13:41:15.693568 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.819731 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.821925 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.822024 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.822053 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:15 crc kubenswrapper[4869]: I0929 13:41:15.822115 4869 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 29 13:41:15 crc kubenswrapper[4869]: E0929 13:41:15.823057 4869 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.179458 4869 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.264959 4869 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283" exitCode=0 Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.265054 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283"} Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.265089 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.266133 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.266207 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.266230 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.267598 4869 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a" exitCode=0 Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.267706 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a"} Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.268062 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.269939 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.269972 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.269989 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.270910 4869 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="0973de5e1e219471421ec9fc4c2b8d8b169f1b03551f41d95dbd76c68ec93195" exitCode=0 Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.271016 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"0973de5e1e219471421ec9fc4c2b8d8b169f1b03551f41d95dbd76c68ec93195"} Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.271142 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.273373 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.273397 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.273412 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.277215 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.277799 4869 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff" exitCode=0 Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.277890 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff"} Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.277923 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.279541 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.279661 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.279680 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.280059 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.280230 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.280261 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.284321 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397"} Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.284360 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5"} Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.284372 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f"} Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.284382 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c"} Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.284452 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.285310 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.285334 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:16 crc kubenswrapper[4869]: I0929 13:41:16.285344 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.179165 4869 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:17 crc kubenswrapper[4869]: E0929 13:41:17.194108 4869 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="3.2s" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.290193 4869 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="316c9d07fbe9596f75247d51cc9c81621e95a77e28496ead75b283cd0912e68c" exitCode=0 Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.290263 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"316c9d07fbe9596f75247d51cc9c81621e95a77e28496ead75b283cd0912e68c"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.290431 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.294324 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.294431 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.294447 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.300212 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"6526787ccb6c5318be1e8b1d93919f5d1ec919edf66fae2865dc6c59c21b2b78"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.300334 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.301929 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.301990 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.302003 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.303115 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.303153 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.303167 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.303259 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.304387 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.304432 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.304446 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.309124 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.309319 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.309378 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.309394 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.309406 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c"} Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.310402 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.310436 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.310469 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:17 crc kubenswrapper[4869]: E0929 13:41:17.337697 4869 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.80:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1869c4951c71f40b default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-09-29 13:41:14.171290635 +0000 UTC m=+0.611934945,LastTimestamp:2025-09-29 13:41:14.171290635 +0000 UTC m=+0.611934945,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.424821 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.437881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.437949 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.437965 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:17 crc kubenswrapper[4869]: I0929 13:41:17.438006 4869 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 29 13:41:17 crc kubenswrapper[4869]: E0929 13:41:17.438788 4869 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Sep 29 13:41:17 crc kubenswrapper[4869]: W0929 13:41:17.791703 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:17 crc kubenswrapper[4869]: E0929 13:41:17.791807 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:18 crc kubenswrapper[4869]: W0929 13:41:18.115780 4869 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Sep 29 13:41:18 crc kubenswrapper[4869]: E0929 13:41:18.115910 4869 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.317107 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5"} Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.317314 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.318769 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.318809 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.318823 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.322019 4869 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3e13f0ebce94f42c289b365655745b59110311c71fa0192f321625b1aa3362b3" exitCode=0 Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.322148 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.322658 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.323028 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3e13f0ebce94f42c289b365655745b59110311c71fa0192f321625b1aa3362b3"} Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.323070 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.323126 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.323871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.323900 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.323911 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.324435 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.324460 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.324472 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.324892 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.324916 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.324926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:18 crc kubenswrapper[4869]: I0929 13:41:18.958828 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.330674 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3da5471e183539b1800dbaa6a498d78b591c6b6827d14839d6f46f5b2968db57"} Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.330718 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.330750 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"41dc260119407737a9665a4b4637c439cee2196ad4058797866bda85e10ed035"} Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.330769 4869 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.330838 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.330778 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ffb52fc63a404485ca140e035cd4f7a3912a0bcdfd0bed4bd4b075d8f32ca521"} Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.330909 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"693c25accf24cd07b6e76ba762416fbeafe68ea357f3ccccfe0fedf4a051e115"} Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.331640 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.331666 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.331677 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.331918 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.331955 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.331972 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.583994 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.584227 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.585775 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.585881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:19 crc kubenswrapper[4869]: I0929 13:41:19.585893 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.337147 4869 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.337176 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.337199 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.337148 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ebb4b6eec608673e81156a47001ec41f60c72d1085f9bbde5d5014f1c8a4ab67"} Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.338083 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.338121 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.338136 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.338179 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.338216 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.338231 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.639929 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.641327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.641370 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.641382 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:20 crc kubenswrapper[4869]: I0929 13:41:20.641402 4869 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.142797 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.339950 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.340856 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.340895 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.340909 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.463840 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.464054 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.465148 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.465188 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:21 crc kubenswrapper[4869]: I0929 13:41:21.465206 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:22 crc kubenswrapper[4869]: I0929 13:41:22.342664 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:22 crc kubenswrapper[4869]: I0929 13:41:22.343928 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:22 crc kubenswrapper[4869]: I0929 13:41:22.343979 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:22 crc kubenswrapper[4869]: I0929 13:41:22.343993 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:22 crc kubenswrapper[4869]: I0929 13:41:22.584359 4869 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Sep 29 13:41:22 crc kubenswrapper[4869]: I0929 13:41:22.584564 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.142842 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.143014 4869 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.143057 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.144136 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.144167 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.144179 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.223128 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.223309 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.224661 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.224724 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.224738 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.232910 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.345658 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.347293 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.347329 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:23 crc kubenswrapper[4869]: I0929 13:41:23.347340 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.252519 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.252792 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.253933 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.253978 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.253988 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.280670 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.281008 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.282580 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.282659 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.282671 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:24 crc kubenswrapper[4869]: E0929 13:41:24.318156 4869 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.511728 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.511989 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.513185 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.513221 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:24 crc kubenswrapper[4869]: I0929 13:41:24.513234 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.180420 4869 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.359207 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.361412 4869 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5" exitCode=255 Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.361460 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5"} Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.361646 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.362479 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.362516 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.362530 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.363206 4869 scope.go:117] "RemoveContainer" containerID="e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.543928 4869 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.543999 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.548738 4869 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.549025 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.967848 4869 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]log ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]etcd ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/openshift.io-api-request-count-filter ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/openshift.io-startkubeinformers ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-apiserver-admission-initializer ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/generic-apiserver-start-informers ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/priority-and-fairness-config-consumer ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/priority-and-fairness-filter ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/storage-object-count-tracker-hook ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-apiextensions-informers ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-apiextensions-controllers ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/crd-informer-synced ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-system-namespaces-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-cluster-authentication-info-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-legacy-token-tracking-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-service-ip-repair-controllers ok Sep 29 13:41:28 crc kubenswrapper[4869]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Sep 29 13:41:28 crc kubenswrapper[4869]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/priority-and-fairness-config-producer ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/bootstrap-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/start-kube-aggregator-informers ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/apiservice-status-local-available-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/apiservice-status-remote-available-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/apiservice-registration-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/apiservice-wait-for-first-sync ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/apiservice-discovery-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/kube-apiserver-autoregistration ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]autoregister-completion ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/apiservice-openapi-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: [+]poststarthook/apiservice-openapiv3-controller ok Sep 29 13:41:28 crc kubenswrapper[4869]: livez check failed Sep 29 13:41:28 crc kubenswrapper[4869]: I0929 13:41:28.967969 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:41:29 crc kubenswrapper[4869]: I0929 13:41:29.366712 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Sep 29 13:41:29 crc kubenswrapper[4869]: I0929 13:41:29.369388 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566"} Sep 29 13:41:29 crc kubenswrapper[4869]: I0929 13:41:29.369569 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:29 crc kubenswrapper[4869]: I0929 13:41:29.370510 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:29 crc kubenswrapper[4869]: I0929 13:41:29.370541 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:29 crc kubenswrapper[4869]: I0929 13:41:29.370554 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.169526 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.169773 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.171125 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.171187 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.171206 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.181666 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.374248 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.375834 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.375894 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:31 crc kubenswrapper[4869]: I0929 13:41:31.375908 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:32 crc kubenswrapper[4869]: I0929 13:41:32.585747 4869 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Sep 29 13:41:32 crc kubenswrapper[4869]: I0929 13:41:32.585910 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Sep 29 13:41:33 crc kubenswrapper[4869]: E0929 13:41:33.539652 4869 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.543018 4869 trace.go:236] Trace[889877883]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Sep-2025 13:41:18.811) (total time: 14731ms): Sep 29 13:41:33 crc kubenswrapper[4869]: Trace[889877883]: ---"Objects listed" error: 14731ms (13:41:33.542) Sep 29 13:41:33 crc kubenswrapper[4869]: Trace[889877883]: [14.731445666s] [14.731445666s] END Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.543066 4869 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.543499 4869 trace.go:236] Trace[1799120868]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Sep-2025 13:41:18.813) (total time: 14730ms): Sep 29 13:41:33 crc kubenswrapper[4869]: Trace[1799120868]: ---"Objects listed" error: 14730ms (13:41:33.543) Sep 29 13:41:33 crc kubenswrapper[4869]: Trace[1799120868]: [14.730125995s] [14.730125995s] END Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.543533 4869 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.545016 4869 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.545260 4869 trace.go:236] Trace[634712227]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Sep-2025 13:41:21.606) (total time: 11938ms): Sep 29 13:41:33 crc kubenswrapper[4869]: Trace[634712227]: ---"Objects listed" error: 11938ms (13:41:33.545) Sep 29 13:41:33 crc kubenswrapper[4869]: Trace[634712227]: [11.93878634s] [11.93878634s] END Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.545280 4869 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Sep 29 13:41:33 crc kubenswrapper[4869]: E0929 13:41:33.546507 4869 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.548051 4869 trace.go:236] Trace[390201041]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Sep-2025 13:41:22.570) (total time: 10977ms): Sep 29 13:41:33 crc kubenswrapper[4869]: Trace[390201041]: ---"Objects listed" error: 10977ms (13:41:33.547) Sep 29 13:41:33 crc kubenswrapper[4869]: Trace[390201041]: [10.977439958s] [10.977439958s] END Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.548424 4869 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.963842 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.964315 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:33 crc kubenswrapper[4869]: I0929 13:41:33.967564 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.170654 4869 apiserver.go:52] "Watching apiserver" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.173673 4869 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.174027 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.174733 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.174803 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.175011 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.175472 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.175808 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.175860 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.175930 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.175977 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.176044 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.178333 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.178733 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.178783 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.178802 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.178733 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.178899 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.178996 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.179043 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.182495 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.187802 4869 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.224764 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.244852 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.249750 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.249789 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.249813 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.249831 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.249849 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.249865 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.249880 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.249896 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.250100 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.250213 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.250339 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.250530 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251035 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251043 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251078 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251106 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251128 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251153 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251178 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251289 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251373 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251527 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251561 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251565 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251582 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251625 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251653 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251676 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251713 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251745 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251770 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251791 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251812 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251833 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251854 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251879 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251899 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251918 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251938 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.251962 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252018 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252050 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252134 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252162 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252216 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252241 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252264 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252289 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252315 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252342 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252369 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252396 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252417 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252443 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252472 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252503 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252530 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252552 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252576 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252619 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252647 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252670 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252699 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252721 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252743 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252769 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252790 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252814 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252872 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252903 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252929 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252952 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252974 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252999 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253022 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253047 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253106 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253137 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253162 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253187 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253249 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253289 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252006 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.256226 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252049 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252167 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252259 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252305 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252651 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252661 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252709 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252780 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252816 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.252936 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253007 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253041 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253182 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253238 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253538 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253557 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253701 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.253810 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.254047 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.254666 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.254741 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.254849 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255086 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255104 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255138 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255302 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255390 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255505 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255621 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255542 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.256418 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255595 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255802 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.255908 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.256108 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.256164 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.256515 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.256235 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.256672 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257007 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257024 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257059 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257156 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257247 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257472 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257691 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257730 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.257750 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258475 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258579 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258693 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258746 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258774 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258796 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258866 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258926 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258803 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258982 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259193 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.258954 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259269 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259285 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259469 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259409 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259293 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259535 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259570 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259644 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259824 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259864 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.259989 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260137 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260166 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260204 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260257 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260288 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260317 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260343 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260415 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260479 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260573 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260651 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260718 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260786 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260883 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260912 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.260949 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261061 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261095 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261118 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261143 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261171 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261198 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261220 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261247 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261285 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261360 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261386 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261411 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261439 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261460 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261483 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261554 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261576 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261670 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261766 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.263471 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.263598 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.264016 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.264093 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.264360 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.264504 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.264880 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.265153 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.265271 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.265275 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.265774 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.265855 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.265856 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.266645 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.267034 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.267259 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.267340 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.267066 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.266844 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.267620 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.267817 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268018 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268062 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268074 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268044 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268387 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268415 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268426 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268521 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.268577 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.269509 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.269595 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.269723 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.269888 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.269879 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270003 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.261795 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270092 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270125 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270007 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270048 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270160 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270060 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270179 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.265898 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270156 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270403 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270439 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270470 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270491 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270510 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270533 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270558 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270580 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270600 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270631 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270651 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270669 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270685 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270708 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270728 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270748 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270771 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270792 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270811 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270829 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270467 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270494 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270534 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270550 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270785 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270835 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270848 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.270928 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271045 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271089 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271123 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271146 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271165 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271185 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271203 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271222 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271242 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271261 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271318 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271349 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271370 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271391 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271345 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271329 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271520 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271681 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271714 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271740 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271419 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271864 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271966 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271992 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272015 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272040 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272063 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272088 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272109 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272129 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272150 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272169 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272209 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272229 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272249 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272271 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272290 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272309 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272330 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272352 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272371 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272395 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272413 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272438 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272461 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272488 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272507 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272541 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272559 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272578 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272598 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272639 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272667 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272694 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272714 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272760 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272797 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272867 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272948 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272970 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272998 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273026 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273046 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273068 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273095 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273123 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273142 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273167 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273214 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273387 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272011 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272039 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.271991 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272304 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272446 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272431 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272699 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272732 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272893 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.272751 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273146 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273158 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.273373 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:41:34.773347851 +0000 UTC m=+21.213992171 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.274123 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.274279 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.274328 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.274414 4869 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.274456 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.274534 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.274984 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.275126 4869 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.275295 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.275628 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.275923 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.276372 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.276731 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.276942 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277098 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277135 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273498 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273512 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273321 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.273795 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277195 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277231 4869 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277257 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277276 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277294 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277314 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277333 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277354 4869 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277365 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277376 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277403 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277419 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277435 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277452 4869 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277473 4869 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277733 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.277962 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.278210 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.278316 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.278651 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.278806 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.278975 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.279065 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.279182 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.280137 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.281075 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.281457 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.281514 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.281749 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.281816 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:34.781788062 +0000 UTC m=+21.222432382 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.281774 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.281941 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.281933 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.281954 4869 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282039 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282144 4869 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282170 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.282172 4869 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282183 4869 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282207 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282225 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.282228 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:34.782219093 +0000 UTC m=+21.222863413 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282223 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282252 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282289 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282305 4869 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282318 4869 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282371 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282385 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282433 4869 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282443 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282453 4869 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282463 4869 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282575 4869 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282587 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282588 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282597 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282650 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282669 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282682 4869 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282696 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282709 4869 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282720 4869 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282732 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282742 4869 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282752 4869 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282772 4869 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282788 4869 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282806 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282821 4869 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282836 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282851 4869 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282866 4869 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282880 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282893 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282905 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282926 4869 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282939 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282951 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282962 4869 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282973 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.282991 4869 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283013 4869 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283027 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283045 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283059 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283072 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283086 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283101 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283119 4869 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283129 4869 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283139 4869 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283148 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283159 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283170 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283181 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283193 4869 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283204 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283217 4869 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283229 4869 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283238 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283249 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283253 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283258 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283298 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283313 4869 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283326 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283338 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283350 4869 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283360 4869 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283371 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283381 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283392 4869 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283402 4869 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283415 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283426 4869 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283435 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283446 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283457 4869 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283466 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283475 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283486 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283496 4869 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283512 4869 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283525 4869 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283538 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283550 4869 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283561 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283574 4869 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283587 4869 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283599 4869 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283637 4869 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283648 4869 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283657 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283666 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283676 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283685 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283694 4869 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283703 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283712 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283721 4869 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283730 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283739 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283748 4869 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283759 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.283820 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.285590 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.285865 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.287106 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.287475 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.287776 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.289142 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.289385 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.295256 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.296069 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.296164 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.296224 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.296366 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.296436 4869 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.296638 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:34.796599588 +0000 UTC m=+21.237243908 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.296629 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.296387 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.296878 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.296914 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.296929 4869 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.296993 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:34.796972318 +0000 UTC m=+21.237616638 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.297179 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.297755 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.297923 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.298503 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.299394 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.299531 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.299687 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.299992 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.300227 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.300570 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.301044 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.302236 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.302839 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.318097 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.321239 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.327214 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.332185 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.332768 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.336928 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.346831 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.357579 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.369806 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.382268 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.384739 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.384897 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.384841 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385064 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385468 4869 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385494 4869 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385507 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385521 4869 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385533 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385545 4869 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385558 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385570 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385580 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385594 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385606 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385637 4869 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385650 4869 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385667 4869 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385679 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385695 4869 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385724 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385735 4869 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385746 4869 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385756 4869 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385767 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385790 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385823 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385854 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385871 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385882 4869 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385893 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385905 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385916 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385927 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385938 4869 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385955 4869 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.385985 4869 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386002 4869 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386014 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386025 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386053 4869 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386064 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386074 4869 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386092 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386106 4869 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386116 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386127 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386138 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386150 4869 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386163 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386176 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386227 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386286 4869 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386333 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386345 4869 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386362 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386373 4869 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386383 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386394 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386404 4869 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386414 4869 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386425 4869 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386436 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386447 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386473 4869 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386486 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386498 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386510 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386522 4869 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386533 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386547 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386558 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386570 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386580 4869 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.386591 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.392658 4869 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.393754 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.405840 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.418050 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.429501 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.441320 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.452813 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.464077 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.474400 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.490719 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.499290 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Sep 29 13:41:34 crc kubenswrapper[4869]: W0929 13:41:34.504786 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-5324c8556f7cb36d0a4640bc69048c29cb0c8dc3ba81cdf793cde8b07f370187 WatchSource:0}: Error finding container 5324c8556f7cb36d0a4640bc69048c29cb0c8dc3ba81cdf793cde8b07f370187: Status 404 returned error can't find the container with id 5324c8556f7cb36d0a4640bc69048c29cb0c8dc3ba81cdf793cde8b07f370187 Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.508126 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Sep 29 13:41:34 crc kubenswrapper[4869]: W0929 13:41:34.513510 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-85878f0c7edfd02ee45fb2806c2b4cedf2058abec9df92ccaf9fda397a23c0e1 WatchSource:0}: Error finding container 85878f0c7edfd02ee45fb2806c2b4cedf2058abec9df92ccaf9fda397a23c0e1: Status 404 returned error can't find the container with id 85878f0c7edfd02ee45fb2806c2b4cedf2058abec9df92ccaf9fda397a23c0e1 Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.520091 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:34 crc kubenswrapper[4869]: W0929 13:41:34.531994 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-5e9a7f00433ce5d5fdec87a684964b581b5b053290fcdf2b55f8488683153e00 WatchSource:0}: Error finding container 5e9a7f00433ce5d5fdec87a684964b581b5b053290fcdf2b55f8488683153e00: Status 404 returned error can't find the container with id 5e9a7f00433ce5d5fdec87a684964b581b5b053290fcdf2b55f8488683153e00 Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.719035 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.728391 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.743935 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.758016 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.772057 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.788266 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.790336 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.790402 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.790453 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.790590 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:41:35.790555523 +0000 UTC m=+22.231199853 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.790602 4869 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.790719 4869 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.790818 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:35.790779919 +0000 UTC m=+22.231424299 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.790883 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:35.790872241 +0000 UTC m=+22.231516571 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.815030 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.826506 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.890942 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:34 crc kubenswrapper[4869]: I0929 13:41:34.890985 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.891140 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.891166 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.891179 4869 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.891260 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:35.89124292 +0000 UTC m=+22.331887240 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.891423 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.891475 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.891492 4869 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:34 crc kubenswrapper[4869]: E0929 13:41:34.891574 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:35.891543177 +0000 UTC m=+22.332187677 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.240958 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.241103 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.389186 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff"} Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.389255 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21"} Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.389272 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5e9a7f00433ce5d5fdec87a684964b581b5b053290fcdf2b55f8488683153e00"} Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.390753 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"85878f0c7edfd02ee45fb2806c2b4cedf2058abec9df92ccaf9fda397a23c0e1"} Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.391856 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c"} Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.391890 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"5324c8556f7cb36d0a4640bc69048c29cb0c8dc3ba81cdf793cde8b07f370187"} Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.404398 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.423146 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.447962 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.464838 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.479447 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.499741 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.513220 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.528539 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.545925 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.562584 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.578070 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.591224 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.608285 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.623580 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.638573 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.653369 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:35Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.796963 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.797121 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.797195 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:41:37.797167301 +0000 UTC m=+24.237811631 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.797252 4869 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.797290 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.797365 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:37.797342946 +0000 UTC m=+24.237987266 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.797520 4869 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.797674 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:37.797651484 +0000 UTC m=+24.238295804 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.898194 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:35 crc kubenswrapper[4869]: I0929 13:41:35.898260 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.898414 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.898433 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.898445 4869 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.898457 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.898498 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.898505 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:37.898487654 +0000 UTC m=+24.339131974 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.898511 4869 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:35 crc kubenswrapper[4869]: E0929 13:41:35.898592 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:37.898572237 +0000 UTC m=+24.339216557 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.241733 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.241755 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:36 crc kubenswrapper[4869]: E0929 13:41:36.241907 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:36 crc kubenswrapper[4869]: E0929 13:41:36.242038 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.245493 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.246134 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.246958 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.247528 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.249198 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.249856 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.250979 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.251636 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.252703 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.253258 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.254324 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.255087 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.255984 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.256494 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.257004 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.257942 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.258644 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.259398 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.259955 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.260555 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.261487 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.262165 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.262597 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.263662 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.264059 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.265096 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.265734 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.266750 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.267288 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.268193 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.268762 4869 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.268861 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.271198 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.271713 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.272116 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.274136 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.275218 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.275796 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.276810 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.277464 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.278638 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.279281 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.280453 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.281436 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.282282 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.283344 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.284673 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.286002 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.287462 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.288030 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.289001 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.289515 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.290675 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Sep 29 13:41:36 crc kubenswrapper[4869]: I0929 13:41:36.291254 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.241072 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.241215 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.398496 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94"} Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.413715 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.432569 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.447371 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.462955 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.474802 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.486592 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.499285 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.511131 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.812129 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.812269 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.812311 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.812354 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:41:41.812318417 +0000 UTC m=+28.252962737 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.812410 4869 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.812433 4869 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.812485 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:41.812468111 +0000 UTC m=+28.253112431 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.812501 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:41.812495262 +0000 UTC m=+28.253139582 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.913372 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:37 crc kubenswrapper[4869]: I0929 13:41:37.913423 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.913576 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.913575 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.913593 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.913600 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.913623 4869 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.913634 4869 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.913675 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:41.913660251 +0000 UTC m=+28.354304571 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:37 crc kubenswrapper[4869]: E0929 13:41:37.913688 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:41.913683211 +0000 UTC m=+28.354327531 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.142128 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-vpmmf"] Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.142931 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vpmmf" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.145189 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.145235 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.148826 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.163317 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.182230 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.197430 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.212455 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.225666 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.241824 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:38 crc kubenswrapper[4869]: E0929 13:41:38.241974 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.242282 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:38 crc kubenswrapper[4869]: E0929 13:41:38.242571 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.243697 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.258603 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.282864 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.307509 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.316957 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkpst\" (UniqueName: \"kubernetes.io/projected/68cd537c-0a3f-4835-83bc-a45ae449712b-kube-api-access-wkpst\") pod \"node-resolver-vpmmf\" (UID: \"68cd537c-0a3f-4835-83bc-a45ae449712b\") " pod="openshift-dns/node-resolver-vpmmf" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.317029 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/68cd537c-0a3f-4835-83bc-a45ae449712b-hosts-file\") pod \"node-resolver-vpmmf\" (UID: \"68cd537c-0a3f-4835-83bc-a45ae449712b\") " pod="openshift-dns/node-resolver-vpmmf" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.417660 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkpst\" (UniqueName: \"kubernetes.io/projected/68cd537c-0a3f-4835-83bc-a45ae449712b-kube-api-access-wkpst\") pod \"node-resolver-vpmmf\" (UID: \"68cd537c-0a3f-4835-83bc-a45ae449712b\") " pod="openshift-dns/node-resolver-vpmmf" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.417742 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/68cd537c-0a3f-4835-83bc-a45ae449712b-hosts-file\") pod \"node-resolver-vpmmf\" (UID: \"68cd537c-0a3f-4835-83bc-a45ae449712b\") " pod="openshift-dns/node-resolver-vpmmf" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.417823 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/68cd537c-0a3f-4835-83bc-a45ae449712b-hosts-file\") pod \"node-resolver-vpmmf\" (UID: \"68cd537c-0a3f-4835-83bc-a45ae449712b\") " pod="openshift-dns/node-resolver-vpmmf" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.447093 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkpst\" (UniqueName: \"kubernetes.io/projected/68cd537c-0a3f-4835-83bc-a45ae449712b-kube-api-access-wkpst\") pod \"node-resolver-vpmmf\" (UID: \"68cd537c-0a3f-4835-83bc-a45ae449712b\") " pod="openshift-dns/node-resolver-vpmmf" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.457858 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vpmmf" Sep 29 13:41:38 crc kubenswrapper[4869]: W0929 13:41:38.488632 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68cd537c_0a3f_4835_83bc_a45ae449712b.slice/crio-bfec37140250907afd52f2c57605b62485dd92fc7016a2d45fbdd427f4cd99a4 WatchSource:0}: Error finding container bfec37140250907afd52f2c57605b62485dd92fc7016a2d45fbdd427f4cd99a4: Status 404 returned error can't find the container with id bfec37140250907afd52f2c57605b62485dd92fc7016a2d45fbdd427f4cd99a4 Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.557660 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-gsck4"] Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.558434 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-mrhp2"] Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.558725 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-vs8mc"] Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.558995 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.559471 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.560046 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.562465 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.563370 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.577463 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.578820 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.578947 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.579245 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.579676 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.579891 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.579883 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.580063 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.580161 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.581358 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.666931 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.692972 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721047 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-conf-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721104 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-cnibin\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721133 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-cni-multus\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721160 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-daemon-config\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721186 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-system-cni-dir\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721223 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/db4e9fcf-5399-4029-9409-22b45496d7c9-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721330 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-etc-kubernetes\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721430 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-cni-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721460 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-proxy-tls\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721481 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-k8s-cni-cncf-io\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721505 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-cnibin\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721524 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721555 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-rootfs\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721575 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z62t\" (UniqueName: \"kubernetes.io/projected/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-kube-api-access-7z62t\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721617 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-hostroot\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721748 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-multus-certs\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721786 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x49dh\" (UniqueName: \"kubernetes.io/projected/0e924d34-8790-41e8-a11a-91a1d0c625ca-kube-api-access-x49dh\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721832 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-os-release\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721854 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-socket-dir-parent\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721898 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0e924d34-8790-41e8-a11a-91a1d0c625ca-cni-binary-copy\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721917 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-netns\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721933 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-kubelet\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721975 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/db4e9fcf-5399-4029-9409-22b45496d7c9-cni-binary-copy\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.721999 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd462\" (UniqueName: \"kubernetes.io/projected/db4e9fcf-5399-4029-9409-22b45496d7c9-kube-api-access-qd462\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.722024 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-mcd-auth-proxy-config\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.722046 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-system-cni-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.722130 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-os-release\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.722211 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-cni-bin\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.722366 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.738996 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.757385 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.772414 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.787270 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.802453 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.819729 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822731 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-conf-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822777 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-cnibin\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822799 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-cni-multus\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822819 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-daemon-config\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822834 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-system-cni-dir\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822852 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/db4e9fcf-5399-4029-9409-22b45496d7c9-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822882 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-cni-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822898 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-etc-kubernetes\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822916 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-proxy-tls\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822938 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-k8s-cni-cncf-io\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822951 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-etc-kubernetes\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822981 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-cnibin\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822961 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-system-cni-dir\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822974 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-cni-multus\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823055 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-cnibin\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822908 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-conf-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.822957 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-cnibin\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823126 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823188 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-k8s-cni-cncf-io\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823239 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-rootfs\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823266 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-cni-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823360 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z62t\" (UniqueName: \"kubernetes.io/projected/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-kube-api-access-7z62t\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823484 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-rootfs\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823514 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-hostroot\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823569 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-multus-certs\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823593 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x49dh\" (UniqueName: \"kubernetes.io/projected/0e924d34-8790-41e8-a11a-91a1d0c625ca-kube-api-access-x49dh\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823632 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-os-release\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823653 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-hostroot\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823661 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-socket-dir-parent\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823708 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-socket-dir-parent\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823714 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-netns\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823743 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-kubelet\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823758 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823768 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0e924d34-8790-41e8-a11a-91a1d0c625ca-cni-binary-copy\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823831 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/db4e9fcf-5399-4029-9409-22b45496d7c9-cni-binary-copy\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823858 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd462\" (UniqueName: \"kubernetes.io/projected/db4e9fcf-5399-4029-9409-22b45496d7c9-kube-api-access-qd462\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823894 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-mcd-auth-proxy-config\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823899 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-netns\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823917 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-system-cni-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823933 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-kubelet\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823863 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/db4e9fcf-5399-4029-9409-22b45496d7c9-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823950 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-os-release\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823954 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-run-multus-certs\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.823981 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-cni-bin\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.824358 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0e924d34-8790-41e8-a11a-91a1d0c625ca-cni-binary-copy\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.824061 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0e924d34-8790-41e8-a11a-91a1d0c625ca-multus-daemon-config\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.824138 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-os-release\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.824217 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-host-var-lib-cni-bin\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.824322 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/db4e9fcf-5399-4029-9409-22b45496d7c9-os-release\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.824022 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e924d34-8790-41e8-a11a-91a1d0c625ca-system-cni-dir\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.824511 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/db4e9fcf-5399-4029-9409-22b45496d7c9-cni-binary-copy\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.824734 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-mcd-auth-proxy-config\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.829312 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-proxy-tls\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.836828 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.843599 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z62t\" (UniqueName: \"kubernetes.io/projected/c2cb4b77-d447-4866-ac1e-eb4f0b4babae-kube-api-access-7z62t\") pod \"machine-config-daemon-mrhp2\" (UID: \"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\") " pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.846843 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd462\" (UniqueName: \"kubernetes.io/projected/db4e9fcf-5399-4029-9409-22b45496d7c9-kube-api-access-qd462\") pod \"multus-additional-cni-plugins-gsck4\" (UID: \"db4e9fcf-5399-4029-9409-22b45496d7c9\") " pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.847978 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x49dh\" (UniqueName: \"kubernetes.io/projected/0e924d34-8790-41e8-a11a-91a1d0c625ca-kube-api-access-x49dh\") pod \"multus-vs8mc\" (UID: \"0e924d34-8790-41e8-a11a-91a1d0c625ca\") " pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.852549 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.864999 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.878142 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.884200 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-vs8mc" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.892373 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.902888 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gsck4" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.909497 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.913708 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.926961 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.943441 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.955680 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mx9tj"] Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.956574 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.959115 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.959293 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.959734 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.959804 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.959826 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.959936 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.961089 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Sep 29 13:41:38 crc kubenswrapper[4869]: I0929 13:41:38.964983 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:38Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.004926 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.032784 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.056727 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.076062 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.092559 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127543 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-systemd\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127626 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-node-log\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127660 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127697 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-ovn\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127804 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovn-node-metrics-cert\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127861 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-bin\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127884 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-ovn-kubernetes\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127910 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-slash\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127927 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-log-socket\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127911 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.127964 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-netd\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128280 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-config\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128314 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-env-overrides\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128336 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-netns\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128357 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btwpm\" (UniqueName: \"kubernetes.io/projected/5d03c451-25ce-46f9-9a14-f2ee29a89521-kube-api-access-btwpm\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128410 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-systemd-units\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128429 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128455 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-var-lib-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128485 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-kubelet\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128501 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-etc-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.128522 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-script-lib\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.145737 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.158329 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.173386 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.195060 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.217939 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229108 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btwpm\" (UniqueName: \"kubernetes.io/projected/5d03c451-25ce-46f9-9a14-f2ee29a89521-kube-api-access-btwpm\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229165 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-systemd-units\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229182 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229206 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-kubelet\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229228 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-var-lib-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229246 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-script-lib\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229264 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-etc-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229289 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229311 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-systemd\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229327 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-node-log\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229343 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-ovn\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229334 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-var-lib-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229343 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229334 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-systemd-units\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229375 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-systemd\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229374 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovn-node-metrics-cert\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229426 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-node-log\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229437 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-ovn\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229431 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-etc-openvswitch\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229487 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-kubelet\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229402 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229516 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-ovn-kubernetes\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229540 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-bin\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229543 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-ovn-kubernetes\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229563 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-slash\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229574 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-bin\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229582 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-log-socket\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229602 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-log-socket\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229635 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-slash\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229655 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-netd\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229679 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-config\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229700 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-env-overrides\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229711 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-netd\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229719 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-netns\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.229786 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-netns\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.230407 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-config\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.230511 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-env-overrides\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.230524 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-script-lib\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.232634 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.234561 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovn-node-metrics-cert\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.241697 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:39 crc kubenswrapper[4869]: E0929 13:41:39.241868 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.251272 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btwpm\" (UniqueName: \"kubernetes.io/projected/5d03c451-25ce-46f9-9a14-f2ee29a89521-kube-api-access-btwpm\") pod \"ovnkube-node-mx9tj\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.255666 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.270179 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.284536 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.297962 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.302236 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.313457 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: W0929 13:41:39.325013 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d03c451_25ce_46f9_9a14_f2ee29a89521.slice/crio-b638d05b433da167ad79d9cca68b9d3ad7cb45b26dc5a45c5d2bfb2c9b1ed1f2 WatchSource:0}: Error finding container b638d05b433da167ad79d9cca68b9d3ad7cb45b26dc5a45c5d2bfb2c9b1ed1f2: Status 404 returned error can't find the container with id b638d05b433da167ad79d9cca68b9d3ad7cb45b26dc5a45c5d2bfb2c9b1ed1f2 Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.329374 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.348657 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.405789 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.405849 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.405860 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"b5909592aca7f5f3cc7eef81c99887bb2c53eb7c5e5ecef7538b2babfac27862"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.407965 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vpmmf" event={"ID":"68cd537c-0a3f-4835-83bc-a45ae449712b","Type":"ContainerStarted","Data":"c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.407999 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vpmmf" event={"ID":"68cd537c-0a3f-4835-83bc-a45ae449712b","Type":"ContainerStarted","Data":"bfec37140250907afd52f2c57605b62485dd92fc7016a2d45fbdd427f4cd99a4"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.409386 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"b638d05b433da167ad79d9cca68b9d3ad7cb45b26dc5a45c5d2bfb2c9b1ed1f2"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.410271 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerStarted","Data":"45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.410307 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerStarted","Data":"1beada7cb44717c6bf08069042b3984d814640e92eedb43a5a3bdec02a8f1c57"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.411482 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vs8mc" event={"ID":"0e924d34-8790-41e8-a11a-91a1d0c625ca","Type":"ContainerStarted","Data":"599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.411508 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vs8mc" event={"ID":"0e924d34-8790-41e8-a11a-91a1d0c625ca","Type":"ContainerStarted","Data":"120482f4bc45a2d05ac348c76e7741ecb99d21053721746904f7f79a89066df3"} Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.420786 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.434965 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.454145 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.470083 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.514007 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.540994 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.559780 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.577881 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.588899 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.592954 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.599263 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.616695 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.643573 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.659070 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.675043 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.692120 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.707237 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.723673 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.740208 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.756464 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.773907 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.785037 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.800777 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.817294 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.832015 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.848713 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.862105 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.887922 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.947372 4869 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.949366 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.949414 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.949425 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.949559 4869 kubelet_node_status.go:76] "Attempting to register node" node="crc" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.957818 4869 kubelet_node_status.go:115] "Node was previously registered" node="crc" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.958149 4869 kubelet_node_status.go:79] "Successfully registered node" node="crc" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.959424 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.959472 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.959484 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.959501 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.959511 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:39Z","lastTransitionTime":"2025-09-29T13:41:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:39 crc kubenswrapper[4869]: E0929 13:41:39.981393 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.985207 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.985269 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.985283 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.985301 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:39 crc kubenswrapper[4869]: I0929 13:41:39.985314 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:39Z","lastTransitionTime":"2025-09-29T13:41:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:39 crc kubenswrapper[4869]: E0929 13:41:39.998815 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.002850 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.002877 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.002902 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.002920 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.002933 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: E0929 13:41:40.022787 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.027072 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.027124 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.027136 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.027155 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.027166 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: E0929 13:41:40.042945 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.049015 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.049063 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.049074 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.049096 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.049111 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: E0929 13:41:40.063155 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: E0929 13:41:40.063280 4869 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.065777 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.065831 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.065841 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.065863 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.065877 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.169513 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.169563 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.169576 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.169639 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.169664 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.242164 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:40 crc kubenswrapper[4869]: E0929 13:41:40.242727 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.243213 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:40 crc kubenswrapper[4869]: E0929 13:41:40.243289 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.273094 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.273162 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.273180 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.273205 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.273220 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.375720 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.375769 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.375779 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.375797 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.375807 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.416033 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b" exitCode=0 Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.416112 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.417155 4869 generic.go:334] "Generic (PLEG): container finished" podID="db4e9fcf-5399-4029-9409-22b45496d7c9" containerID="45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567" exitCode=0 Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.417178 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerDied","Data":"45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.441148 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.454129 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.479364 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.479396 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.479408 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.479425 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.479434 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.480039 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.493812 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.506887 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.522836 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.545566 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.563589 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.579427 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.581764 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.581795 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.581805 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.581822 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.581833 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.592828 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.612685 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.625814 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.637375 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.651600 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.669099 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.684300 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.684343 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.684353 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.684368 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.684379 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.685490 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.699707 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.711950 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.725253 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.740873 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.753565 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.769085 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.787050 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.787345 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.787366 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.787376 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.787391 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.787403 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.808764 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.821344 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.836059 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.857139 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-8cjvt"] Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.857975 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.860091 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.860118 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.860155 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.862165 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.877584 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.890418 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.890468 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.890481 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.890532 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.890549 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.891050 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.905366 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.919086 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.934762 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.949315 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c6795986-350e-4a46-8196-eb5cb3ba018e-host\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.949382 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c6795986-350e-4a46-8196-eb5cb3ba018e-serviceca\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.949416 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqv6j\" (UniqueName: \"kubernetes.io/projected/c6795986-350e-4a46-8196-eb5cb3ba018e-kube-api-access-sqv6j\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.978459 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.993880 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.993936 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.993952 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.993970 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:40 crc kubenswrapper[4869]: I0929 13:41:40.993981 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:40Z","lastTransitionTime":"2025-09-29T13:41:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.015471 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.050750 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c6795986-350e-4a46-8196-eb5cb3ba018e-host\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.050824 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c6795986-350e-4a46-8196-eb5cb3ba018e-serviceca\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.050863 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqv6j\" (UniqueName: \"kubernetes.io/projected/c6795986-350e-4a46-8196-eb5cb3ba018e-kube-api-access-sqv6j\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.050956 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c6795986-350e-4a46-8196-eb5cb3ba018e-host\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.052361 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c6795986-350e-4a46-8196-eb5cb3ba018e-serviceca\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.055900 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.087228 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqv6j\" (UniqueName: \"kubernetes.io/projected/c6795986-350e-4a46-8196-eb5cb3ba018e-kube-api-access-sqv6j\") pod \"node-ca-8cjvt\" (UID: \"c6795986-350e-4a46-8196-eb5cb3ba018e\") " pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.097113 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.097154 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.097165 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.097183 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.097194 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.116656 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.159858 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.177100 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8cjvt" Sep 29 13:41:41 crc kubenswrapper[4869]: W0929 13:41:41.192954 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6795986_350e_4a46_8196_eb5cb3ba018e.slice/crio-97f8d6b5be788da9c7c42be3ac9c171e40317c3848894aa02965620553e2280f WatchSource:0}: Error finding container 97f8d6b5be788da9c7c42be3ac9c171e40317c3848894aa02965620553e2280f: Status 404 returned error can't find the container with id 97f8d6b5be788da9c7c42be3ac9c171e40317c3848894aa02965620553e2280f Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.196623 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.199909 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.199965 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.199983 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.200003 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.200015 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.233427 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.240724 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.240884 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.276444 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.302622 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.303085 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.303101 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.303127 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.303419 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.316000 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.406964 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.407042 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.407057 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.407086 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.407101 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.422316 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8cjvt" event={"ID":"c6795986-350e-4a46-8196-eb5cb3ba018e","Type":"ContainerStarted","Data":"97f8d6b5be788da9c7c42be3ac9c171e40317c3848894aa02965620553e2280f"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.427856 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.427908 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.427920 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.430851 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerStarted","Data":"4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.462662 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.485833 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.503133 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.510320 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.510377 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.510389 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.510407 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.510419 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.529125 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.548396 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.565103 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.597098 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.613847 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.613881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.613931 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.613949 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.613961 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.635339 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.673969 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.714648 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.717796 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.717871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.717887 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.717903 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.717916 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.761965 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.795807 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.820529 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.820568 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.820577 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.820594 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.820605 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.838877 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.858550 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.858685 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:41:49.858660208 +0000 UTC m=+36.299304538 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.858727 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.858789 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.858870 4869 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.858911 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:49.858903195 +0000 UTC m=+36.299547515 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.858871 4869 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.859691 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:49.859519781 +0000 UTC m=+36.300164101 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.876868 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.924164 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.924209 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.924230 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.924248 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.924258 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:41Z","lastTransitionTime":"2025-09-29T13:41:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.959710 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:41 crc kubenswrapper[4869]: I0929 13:41:41.959789 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.959978 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.960003 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.960017 4869 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.960077 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:49.960058853 +0000 UTC m=+36.400703173 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.960137 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.960149 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.960158 4869 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:41 crc kubenswrapper[4869]: E0929 13:41:41.960186 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:49.960175846 +0000 UTC m=+36.400820166 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.027360 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.027419 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.027445 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.027480 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.027506 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.130601 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.130672 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.130685 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.130706 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.130719 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.234259 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.234311 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.234325 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.234351 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.234366 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.241315 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:42 crc kubenswrapper[4869]: E0929 13:41:42.241516 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.242156 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:42 crc kubenswrapper[4869]: E0929 13:41:42.242270 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.337349 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.337437 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.337447 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.337464 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.337475 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.437721 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.437787 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.437800 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.439852 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.439890 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.439900 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.439934 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.439948 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.439974 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerDied","Data":"4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.439957 4869 generic.go:334] "Generic (PLEG): container finished" podID="db4e9fcf-5399-4029-9409-22b45496d7c9" containerID="4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608" exitCode=0 Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.441521 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8cjvt" event={"ID":"c6795986-350e-4a46-8196-eb5cb3ba018e","Type":"ContainerStarted","Data":"fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.461334 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.475325 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.491794 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.503220 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.517430 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.533499 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.543460 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.543497 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.543507 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.543523 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.543533 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.549588 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.563913 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.583734 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.597662 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.613983 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.629516 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.649067 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.651096 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.651249 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.651344 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.651455 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.651475 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.665010 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.679202 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.694573 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.708553 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.722787 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.738876 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.751903 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.754042 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.754071 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.754082 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.754096 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.754106 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.766915 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.777049 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.790539 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.834163 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.857842 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.857890 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.857902 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.857923 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.857936 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.875803 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.916533 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.953344 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.959957 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.960002 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.960012 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.960027 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.960039 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:42Z","lastTransitionTime":"2025-09-29T13:41:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:42 crc kubenswrapper[4869]: I0929 13:41:42.998469 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:42Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.062274 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.062328 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.062344 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.062365 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.062382 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.164468 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.164508 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.164520 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.164536 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.164547 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.241144 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:43 crc kubenswrapper[4869]: E0929 13:41:43.241305 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.267076 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.267130 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.267149 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.267177 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.267193 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.369937 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.369978 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.369988 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.370003 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.370014 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.448702 4869 generic.go:334] "Generic (PLEG): container finished" podID="db4e9fcf-5399-4029-9409-22b45496d7c9" containerID="4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6" exitCode=0 Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.448729 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerDied","Data":"4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.466019 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.472296 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.472338 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.472351 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.472367 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.472377 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.483278 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.501105 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.514633 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.534847 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.551115 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.567322 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.574504 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.574572 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.574582 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.574600 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.574650 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.584876 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.599831 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.616146 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.638008 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.654768 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.671549 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.680953 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.681013 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.681027 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.681052 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.681067 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.689259 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:43Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.783596 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.783676 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.783687 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.783707 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.783721 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.891599 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.891691 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.891708 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.891733 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.891749 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.995046 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.995288 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.995352 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.995444 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:43 crc kubenswrapper[4869]: I0929 13:41:43.995540 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:43Z","lastTransitionTime":"2025-09-29T13:41:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.098315 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.098379 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.098398 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.098424 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.098443 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.200953 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.201200 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.201271 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.201302 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.201317 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.241092 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.241227 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:44 crc kubenswrapper[4869]: E0929 13:41:44.241277 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:44 crc kubenswrapper[4869]: E0929 13:41:44.241419 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.258758 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.258863 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.272389 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.290227 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.302726 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.303504 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.303552 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.303563 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.303584 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.303598 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.316248 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.340856 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.357150 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.373416 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.388496 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.404517 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.406660 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.406708 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.406728 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.406755 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.406770 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.419355 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.434507 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.447827 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.456777 4869 generic.go:334] "Generic (PLEG): container finished" podID="db4e9fcf-5399-4029-9409-22b45496d7c9" containerID="bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18" exitCode=0 Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.456851 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerDied","Data":"bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.461024 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.463072 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.477726 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.497415 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.509801 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.509865 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.509880 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.509903 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.509918 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.511017 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.525174 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.538143 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.550730 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.568602 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.582838 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.594093 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.611053 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.611929 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.611962 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.611972 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.611990 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.612003 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.621185 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.638342 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.650900 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.674804 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.715252 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.715306 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.715317 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.715337 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.715351 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.818325 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.818365 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.818375 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.818391 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.818402 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.921649 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.921701 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.921713 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.921735 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:44 crc kubenswrapper[4869]: I0929 13:41:44.921754 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:44Z","lastTransitionTime":"2025-09-29T13:41:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.024861 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.024913 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.024925 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.024944 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.024955 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.127635 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.127692 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.127708 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.127724 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.127737 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.230503 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.230545 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.230556 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.230581 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.230595 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.240963 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:45 crc kubenswrapper[4869]: E0929 13:41:45.241122 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.338362 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.338784 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.338797 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.338816 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.338831 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.441974 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.442025 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.442039 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.442059 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.442073 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.470088 4869 generic.go:334] "Generic (PLEG): container finished" podID="db4e9fcf-5399-4029-9409-22b45496d7c9" containerID="5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043" exitCode=0 Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.470144 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerDied","Data":"5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.486510 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.502480 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.523200 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.542410 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.544309 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.544372 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.544388 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.544412 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.544434 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.555330 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.569676 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.588551 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.601319 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.625918 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.644134 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.647307 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.647377 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.647390 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.647413 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.647424 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.659431 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.673134 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.689270 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.703409 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:45Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.750420 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.750872 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.750969 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.751038 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.751102 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.854061 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.854332 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.854402 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.854490 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.854564 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.957286 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.957326 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.957338 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.957356 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:45 crc kubenswrapper[4869]: I0929 13:41:45.957367 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:45Z","lastTransitionTime":"2025-09-29T13:41:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.061110 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.061169 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.061189 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.061214 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.061235 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.164812 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.164854 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.164865 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.164881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.164891 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.241767 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:46 crc kubenswrapper[4869]: E0929 13:41:46.241907 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.242072 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:46 crc kubenswrapper[4869]: E0929 13:41:46.242285 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.267279 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.267325 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.267337 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.267354 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.267365 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.369871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.369922 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.369934 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.369954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.369967 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.473528 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.473577 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.473589 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.473623 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.473634 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.480834 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.481740 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.481797 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.488040 4869 generic.go:334] "Generic (PLEG): container finished" podID="db4e9fcf-5399-4029-9409-22b45496d7c9" containerID="d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44" exitCode=0 Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.488113 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerDied","Data":"d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.493976 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.512992 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.555302 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.556240 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.562570 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.577190 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.577478 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.577522 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.577534 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.577556 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.577574 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.597989 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.611384 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.624019 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.637481 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.649735 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.661218 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.680188 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.680223 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.680232 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.680246 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.680267 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.680897 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.692418 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.705282 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.714603 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.728385 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.743990 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.760904 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.774003 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.782470 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.782545 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.782565 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.782595 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.782680 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.796960 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.811978 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.832881 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.849793 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.867415 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.881914 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.884824 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.884862 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.884874 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.884890 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.884902 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.897288 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.906974 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.918498 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.927908 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:46Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.988166 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.988210 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.988239 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.988258 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:46 crc kubenswrapper[4869]: I0929 13:41:46.988268 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:46Z","lastTransitionTime":"2025-09-29T13:41:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.091481 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.091528 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.091546 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.091568 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.091581 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.195150 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.195198 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.195211 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.195235 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.195249 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.240953 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:47 crc kubenswrapper[4869]: E0929 13:41:47.241124 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.297781 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.297815 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.297824 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.297838 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.297847 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.401286 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.401335 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.401350 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.401367 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.401721 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.497070 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" event={"ID":"db4e9fcf-5399-4029-9409-22b45496d7c9","Type":"ContainerStarted","Data":"0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.497131 4869 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.503972 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.504017 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.504029 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.504046 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.504059 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.514383 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.531005 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.543113 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.553134 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.565604 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.578219 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.596389 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.606553 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.606587 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.606596 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.606642 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.606664 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.607927 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.626411 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.638368 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.650262 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.660901 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.671701 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.683802 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:47Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.709996 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.710045 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.710056 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.710073 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.710085 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.812711 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.812776 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.812794 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.812815 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.812828 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.916436 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.916486 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.916497 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.916513 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:47 crc kubenswrapper[4869]: I0929 13:41:47.916524 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:47Z","lastTransitionTime":"2025-09-29T13:41:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.018862 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.018906 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.018918 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.018938 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.018950 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.122453 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.122509 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.122523 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.122542 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.122553 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.225294 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.225338 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.225350 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.225369 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.225381 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.244311 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:48 crc kubenswrapper[4869]: E0929 13:41:48.244470 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.244549 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:48 crc kubenswrapper[4869]: E0929 13:41:48.244657 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.336575 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.336640 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.336656 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.336678 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.336693 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.439498 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.439536 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.439548 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.439565 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.439578 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.501102 4869 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.543629 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.543677 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.543692 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.543712 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.543723 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.646860 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.646912 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.646921 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.646938 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.646949 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.749471 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.749515 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.749527 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.749542 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.749554 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.852113 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.852162 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.852175 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.852191 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.852202 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.954549 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.954592 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.954604 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.954635 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:48 crc kubenswrapper[4869]: I0929 13:41:48.954650 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:48Z","lastTransitionTime":"2025-09-29T13:41:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.057392 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.057435 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.057444 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.057468 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.057480 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.159443 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.159480 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.159496 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.159514 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.159525 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.240774 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.240926 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.262508 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.262586 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.262597 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.262631 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.262652 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.365666 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.365712 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.365722 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.365741 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.365752 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.468859 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.468902 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.468911 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.468943 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.468953 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.506282 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/0.log" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.508467 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee" exitCode=1 Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.508507 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.509327 4869 scope.go:117] "RemoveContainer" containerID="773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.524142 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.538957 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.551724 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.563392 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.571747 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.571793 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.571807 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.571830 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.571846 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.582159 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.596711 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.615946 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:49Z\\\",\\\"message\\\":\\\"ector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:41:49.000723 6161 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0929 13:41:49.000806 6161 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:41:49.000830 6161 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:41:49.000874 6161 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0929 13:41:49.000905 6161 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:41:49.000937 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0929 13:41:49.000988 6161 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:41:49.000994 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:41:49.001009 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:41:49.001041 6161 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:41:49.001052 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:41:49.001064 6161 factory.go:656] Stopping watch factory\\\\nI0929 13:41:49.001066 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:41:49.001077 6161 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:41:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.630427 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.645392 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.658990 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.672049 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.674342 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.674372 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.674382 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.674397 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.674409 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.688984 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.703476 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.716654 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:49Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.777401 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.777457 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.777480 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.777500 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.777518 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.862382 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.862654 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:42:05.862594985 +0000 UTC m=+52.303239345 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.863079 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.863150 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.863325 4869 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.863405 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:05.863389966 +0000 UTC m=+52.304034326 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.863397 4869 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.863585 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:05.8635429 +0000 UTC m=+52.304187260 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.880875 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.880933 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.880948 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.880973 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.880986 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.964751 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.964839 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.964993 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.965013 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.965020 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.965078 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.965026 4869 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.965196 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:05.965158761 +0000 UTC m=+52.405803081 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.965093 4869 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:49 crc kubenswrapper[4869]: E0929 13:41:49.965394 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:05.965350516 +0000 UTC m=+52.405994846 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.984364 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.984419 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.984435 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.984462 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:49 crc kubenswrapper[4869]: I0929 13:41:49.984483 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:49Z","lastTransitionTime":"2025-09-29T13:41:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.088088 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.088142 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.088154 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.088173 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.088186 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.191381 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.191429 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.191443 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.191467 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.191482 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.192622 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.192674 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.192703 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.192728 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.192743 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: E0929 13:41:50.214367 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.222245 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.222305 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.222318 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.222346 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.222362 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.240872 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.240925 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:50 crc kubenswrapper[4869]: E0929 13:41:50.241045 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:50 crc kubenswrapper[4869]: E0929 13:41:50.241209 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:50 crc kubenswrapper[4869]: E0929 13:41:50.246937 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.253379 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.253415 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.253426 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.253445 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.253457 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: E0929 13:41:50.270094 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.275406 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.275464 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.275477 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.275494 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.275504 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: E0929 13:41:50.289792 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.294724 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.294767 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.294778 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.294797 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.294811 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: E0929 13:41:50.308879 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: E0929 13:41:50.309043 4869 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.310764 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.310793 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.310802 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.310817 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.310827 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.412831 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.412871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.412881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.412896 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.412907 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.513145 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/0.log" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.514483 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.514541 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.514555 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.514571 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.514581 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.515604 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.515784 4869 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.535157 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:49Z\\\",\\\"message\\\":\\\"ector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:41:49.000723 6161 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0929 13:41:49.000806 6161 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:41:49.000830 6161 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:41:49.000874 6161 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0929 13:41:49.000905 6161 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:41:49.000937 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0929 13:41:49.000988 6161 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:41:49.000994 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:41:49.001009 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:41:49.001041 6161 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:41:49.001052 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:41:49.001064 6161 factory.go:656] Stopping watch factory\\\\nI0929 13:41:49.001066 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:41:49.001077 6161 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:41:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.547284 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.549040 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr"] Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.549624 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.551495 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.551690 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.562308 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.575371 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.585729 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.597175 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.608951 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.616977 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.617027 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.617037 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.617071 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.617082 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.621963 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.632978 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.643943 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.656336 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.664501 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.672415 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c9bccd33-a790-4ed3-a942-b08394a00913-env-overrides\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.672512 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c9bccd33-a790-4ed3-a942-b08394a00913-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.672539 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh6wn\" (UniqueName: \"kubernetes.io/projected/c9bccd33-a790-4ed3-a942-b08394a00913-kube-api-access-fh6wn\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.672597 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c9bccd33-a790-4ed3-a942-b08394a00913-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.674204 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.688085 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.700167 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.711360 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.719655 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.719709 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.719719 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.719735 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.719746 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.723015 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.735298 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.745308 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.753696 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.767057 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.773706 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c9bccd33-a790-4ed3-a942-b08394a00913-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.773763 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh6wn\" (UniqueName: \"kubernetes.io/projected/c9bccd33-a790-4ed3-a942-b08394a00913-kube-api-access-fh6wn\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.773794 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c9bccd33-a790-4ed3-a942-b08394a00913-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.773831 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c9bccd33-a790-4ed3-a942-b08394a00913-env-overrides\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.774527 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c9bccd33-a790-4ed3-a942-b08394a00913-env-overrides\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.783050 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c9bccd33-a790-4ed3-a942-b08394a00913-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.785131 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c9bccd33-a790-4ed3-a942-b08394a00913-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.785844 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.796446 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.797981 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh6wn\" (UniqueName: \"kubernetes.io/projected/c9bccd33-a790-4ed3-a942-b08394a00913-kube-api-access-fh6wn\") pod \"ovnkube-control-plane-749d76644c-q4mtr\" (UID: \"c9bccd33-a790-4ed3-a942-b08394a00913\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.811468 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.821630 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.821692 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.821707 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.821728 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.821743 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.824656 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.843346 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:49Z\\\",\\\"message\\\":\\\"ector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:41:49.000723 6161 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0929 13:41:49.000806 6161 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:41:49.000830 6161 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:41:49.000874 6161 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0929 13:41:49.000905 6161 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:41:49.000937 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0929 13:41:49.000988 6161 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:41:49.000994 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:41:49.001009 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:41:49.001041 6161 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:41:49.001052 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:41:49.001064 6161 factory.go:656] Stopping watch factory\\\\nI0929 13:41:49.001066 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:41:49.001077 6161 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:41:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.855928 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.862493 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.872209 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: W0929 13:41:50.875037 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9bccd33_a790_4ed3_a942_b08394a00913.slice/crio-6dfaffe657705d210a48b890b6661add31f2dbc733d0786665975e53a482ab90 WatchSource:0}: Error finding container 6dfaffe657705d210a48b890b6661add31f2dbc733d0786665975e53a482ab90: Status 404 returned error can't find the container with id 6dfaffe657705d210a48b890b6661add31f2dbc733d0786665975e53a482ab90 Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.887210 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.924425 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.924477 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.924490 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.924510 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:50 crc kubenswrapper[4869]: I0929 13:41:50.924522 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:50Z","lastTransitionTime":"2025-09-29T13:41:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.027200 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.027266 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.027282 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.027304 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.027318 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.129586 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.129647 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.129664 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.129688 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.129741 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.232453 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.232505 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.232518 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.232539 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.232550 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.241304 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:51 crc kubenswrapper[4869]: E0929 13:41:51.241547 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.335503 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.335569 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.335583 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.335602 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.335635 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.437735 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.437773 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.437784 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.437802 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.437813 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.521136 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" event={"ID":"c9bccd33-a790-4ed3-a942-b08394a00913","Type":"ContainerStarted","Data":"634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.521200 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" event={"ID":"c9bccd33-a790-4ed3-a942-b08394a00913","Type":"ContainerStarted","Data":"6dfaffe657705d210a48b890b6661add31f2dbc733d0786665975e53a482ab90"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.523003 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/1.log" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.523815 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/0.log" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.527628 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59" exitCode=1 Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.527686 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.527744 4869 scope.go:117] "RemoveContainer" containerID="773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.530029 4869 scope.go:117] "RemoveContainer" containerID="cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59" Sep 29 13:41:51 crc kubenswrapper[4869]: E0929 13:41:51.530295 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.540755 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.540814 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.540833 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.540857 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.540879 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.547683 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.562169 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.576310 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.588520 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.602108 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.615811 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.633537 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.650471 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.650522 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.650534 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.650553 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.650564 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.654104 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.674490 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:49Z\\\",\\\"message\\\":\\\"ector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:41:49.000723 6161 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0929 13:41:49.000806 6161 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:41:49.000830 6161 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:41:49.000874 6161 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0929 13:41:49.000905 6161 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:41:49.000937 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0929 13:41:49.000988 6161 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:41:49.000994 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:41:49.001009 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:41:49.001041 6161 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:41:49.001052 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:41:49.001064 6161 factory.go:656] Stopping watch factory\\\\nI0929 13:41:49.001066 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:41:49.001077 6161 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:41:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.689873 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.705995 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.719975 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.730987 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.745327 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.752955 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.753003 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.753017 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.753039 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.753052 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.760501 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:51Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.855877 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.855913 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.855926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.855943 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.855954 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.958229 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.958269 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.958280 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.958295 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:51 crc kubenswrapper[4869]: I0929 13:41:51.958307 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:51Z","lastTransitionTime":"2025-09-29T13:41:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.060591 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.060648 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.060658 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.060674 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.060687 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.164683 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.164760 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.164774 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.164797 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.164809 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.240969 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.241085 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:52 crc kubenswrapper[4869]: E0929 13:41:52.241185 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:52 crc kubenswrapper[4869]: E0929 13:41:52.241246 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.267815 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.267886 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.267901 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.267927 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.267941 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.370493 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.370540 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.370548 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.370581 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.370592 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.383262 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-mxqkf"] Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.383977 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:52 crc kubenswrapper[4869]: E0929 13:41:52.384132 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.401300 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.415247 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.429791 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.441465 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.457041 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.472766 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.472811 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.472853 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.472823 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.472890 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.473011 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.484637 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.492874 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb6gv\" (UniqueName: \"kubernetes.io/projected/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-kube-api-access-qb6gv\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.492980 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.499195 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.517709 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.535792 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" event={"ID":"c9bccd33-a790-4ed3-a942-b08394a00913","Type":"ContainerStarted","Data":"9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.537046 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.538678 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/1.log" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.557562 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.576254 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.576310 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.576340 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.576705 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.576725 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.589025 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: E0929 13:41:52.594000 4869 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:41:52 crc kubenswrapper[4869]: E0929 13:41:52.594152 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs podName:9d791a01-f367-41f9-bd94-a7cee0b4b7c7 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:53.094123859 +0000 UTC m=+39.534768179 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs") pod "network-metrics-daemon-mxqkf" (UID: "9d791a01-f367-41f9-bd94-a7cee0b4b7c7") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.594246 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.594357 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb6gv\" (UniqueName: \"kubernetes.io/projected/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-kube-api-access-qb6gv\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.605683 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.620735 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb6gv\" (UniqueName: \"kubernetes.io/projected/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-kube-api-access-qb6gv\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.634165 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:49Z\\\",\\\"message\\\":\\\"ector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:41:49.000723 6161 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0929 13:41:49.000806 6161 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:41:49.000830 6161 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:41:49.000874 6161 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0929 13:41:49.000905 6161 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:41:49.000937 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0929 13:41:49.000988 6161 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:41:49.000994 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:41:49.001009 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:41:49.001041 6161 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:41:49.001052 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:41:49.001064 6161 factory.go:656] Stopping watch factory\\\\nI0929 13:41:49.001066 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:41:49.001077 6161 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:41:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.648063 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.662252 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.681087 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.681090 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.681129 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.681298 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.681317 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.681329 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.694436 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.709151 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.723685 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.738942 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.753704 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.768211 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.780245 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.783861 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.783903 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.783913 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.783933 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.783944 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.794305 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.797366 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.798165 4869 scope.go:117] "RemoveContainer" containerID="cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59" Sep 29 13:41:52 crc kubenswrapper[4869]: E0929 13:41:52.798345 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.809451 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.829808 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773ba638422782e073275aa74209b0597adc47f2a5b2ecdc7000e5626ea1c1ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:49Z\\\",\\\"message\\\":\\\"ector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:41:49.000723 6161 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0929 13:41:49.000806 6161 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:41:49.000830 6161 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:41:49.000874 6161 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0929 13:41:49.000905 6161 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:41:49.000937 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0929 13:41:49.000988 6161 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:41:49.000994 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:41:49.001009 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:41:49.001041 6161 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:41:49.001052 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:41:49.001064 6161 factory.go:656] Stopping watch factory\\\\nI0929 13:41:49.001066 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:41:49.001077 6161 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:41:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.842581 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.854405 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.868747 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.886843 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.886892 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.886905 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.886925 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.886936 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.887325 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.903541 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.916455 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.931539 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.943398 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.958236 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.973042 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.989167 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.989217 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.989229 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.989248 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.989259 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:52Z","lastTransitionTime":"2025-09-29T13:41:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:52 crc kubenswrapper[4869]: I0929 13:41:52.994500 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:52Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.011698 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.024404 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.040210 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.051985 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.065413 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.081322 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.091749 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.091783 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.091795 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.091815 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.091828 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.098025 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.099415 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:53 crc kubenswrapper[4869]: E0929 13:41:53.099660 4869 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:41:53 crc kubenswrapper[4869]: E0929 13:41:53.099790 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs podName:9d791a01-f367-41f9-bd94-a7cee0b4b7c7 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:54.099760359 +0000 UTC m=+40.540404879 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs") pod "network-metrics-daemon-mxqkf" (UID: "9d791a01-f367-41f9-bd94-a7cee0b4b7c7") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.112250 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.131410 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.144542 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:53Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.194241 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.194291 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.194303 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.194322 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.194333 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.240884 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:53 crc kubenswrapper[4869]: E0929 13:41:53.241095 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.296696 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.296763 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.296775 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.296792 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.296805 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.400105 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.400552 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.400565 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.400584 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.400594 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.503853 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.503912 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.503923 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.503943 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.503970 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.607033 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.607076 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.607089 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.607111 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.607124 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.710502 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.710549 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.710562 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.710632 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.710648 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.813809 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.813854 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.813864 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.813881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.813895 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.916749 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.916833 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.916857 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.916888 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:53 crc kubenswrapper[4869]: I0929 13:41:53.916906 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:53Z","lastTransitionTime":"2025-09-29T13:41:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.020995 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.021032 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.021045 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.021068 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.021078 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.112155 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:54 crc kubenswrapper[4869]: E0929 13:41:54.112357 4869 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:41:54 crc kubenswrapper[4869]: E0929 13:41:54.112431 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs podName:9d791a01-f367-41f9-bd94-a7cee0b4b7c7 nodeName:}" failed. No retries permitted until 2025-09-29 13:41:56.112407134 +0000 UTC m=+42.553051454 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs") pod "network-metrics-daemon-mxqkf" (UID: "9d791a01-f367-41f9-bd94-a7cee0b4b7c7") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.124481 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.124530 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.124545 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.124567 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.124579 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.227874 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.227925 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.227937 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.227953 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.227966 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.241555 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.241646 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:54 crc kubenswrapper[4869]: E0929 13:41:54.241770 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:41:54 crc kubenswrapper[4869]: E0929 13:41:54.241954 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.242492 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:54 crc kubenswrapper[4869]: E0929 13:41:54.242588 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.255602 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.268159 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.285938 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.304794 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.327209 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.329412 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.329448 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.329461 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.329479 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.329492 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.342765 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.359354 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.374417 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.385951 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.396569 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.411203 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.420265 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.431523 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.431567 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.431578 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.431596 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.431607 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.436923 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.448028 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.459902 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.471007 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:54Z is after 2025-08-24T17:21:41Z" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.534031 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.534081 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.534092 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.534111 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.534126 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.637135 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.637190 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.637200 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.637218 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.637230 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.740271 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.740320 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.740335 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.740355 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.740371 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.843157 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.843198 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.843207 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.843228 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.843245 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.945598 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.945648 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.945665 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.945690 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:54 crc kubenswrapper[4869]: I0929 13:41:54.945702 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:54Z","lastTransitionTime":"2025-09-29T13:41:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.048055 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.048098 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.048111 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.048130 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.048143 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.151110 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.151172 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.151192 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.151210 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.151225 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.241714 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:55 crc kubenswrapper[4869]: E0929 13:41:55.241850 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.254194 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.254235 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.254245 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.254278 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.254289 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.356508 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.356555 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.356574 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.356591 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.356602 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.459017 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.459079 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.459089 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.459106 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.459117 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.561130 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.561179 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.561197 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.561213 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.561224 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.663898 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.663944 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.663954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.663971 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.663981 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.766072 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.766127 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.766136 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.766156 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.766167 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.868701 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.868760 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.868771 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.868791 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.868808 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.971471 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.971524 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.971534 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.971553 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:55 crc kubenswrapper[4869]: I0929 13:41:55.971565 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:55Z","lastTransitionTime":"2025-09-29T13:41:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.074460 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.074517 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.074527 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.074544 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.074555 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.133341 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:56 crc kubenswrapper[4869]: E0929 13:41:56.133491 4869 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:41:56 crc kubenswrapper[4869]: E0929 13:41:56.133579 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs podName:9d791a01-f367-41f9-bd94-a7cee0b4b7c7 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:00.133557236 +0000 UTC m=+46.574201576 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs") pod "network-metrics-daemon-mxqkf" (UID: "9d791a01-f367-41f9-bd94-a7cee0b4b7c7") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.177688 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.177744 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.177760 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.177780 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.177800 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.241758 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.241808 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.241758 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:56 crc kubenswrapper[4869]: E0929 13:41:56.241902 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:56 crc kubenswrapper[4869]: E0929 13:41:56.241997 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:56 crc kubenswrapper[4869]: E0929 13:41:56.242215 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.280073 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.280125 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.280142 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.280160 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.280173 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.382399 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.382442 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.382455 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.382474 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.382487 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.484748 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.484794 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.484804 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.484820 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.484830 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.587544 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.587599 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.587645 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.587669 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.587683 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.690832 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.690867 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.690877 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.690893 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.690903 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.793348 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.793394 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.793406 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.793424 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.793434 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.895679 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.895720 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.895729 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.895743 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.895753 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.998571 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.998623 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.998636 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.998652 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:56 crc kubenswrapper[4869]: I0929 13:41:56.998664 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:56Z","lastTransitionTime":"2025-09-29T13:41:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.102002 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.102043 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.102056 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.102073 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.102085 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.204537 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.204578 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.204588 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.204630 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.204644 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.241061 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:57 crc kubenswrapper[4869]: E0929 13:41:57.241203 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.307358 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.307413 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.307427 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.307446 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.307459 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.409897 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.409957 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.409972 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.409992 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.410006 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.511800 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.511847 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.511857 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.511877 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.511887 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.615333 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.615380 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.615393 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.615413 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.615426 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.717725 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.717774 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.717783 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.717799 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.717812 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.820074 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.820112 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.820121 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.820135 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.820145 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.922353 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.922421 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.922432 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.922446 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:57 crc kubenswrapper[4869]: I0929 13:41:57.922476 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:57Z","lastTransitionTime":"2025-09-29T13:41:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.025410 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.025444 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.025454 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.025470 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.025482 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.128208 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.128258 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.128271 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.128286 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.128297 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.230594 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.230671 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.230683 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.230700 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.230712 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.240976 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.241007 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.241040 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:41:58 crc kubenswrapper[4869]: E0929 13:41:58.241126 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:41:58 crc kubenswrapper[4869]: E0929 13:41:58.241175 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:41:58 crc kubenswrapper[4869]: E0929 13:41:58.241384 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.333813 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.333865 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.333874 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.333892 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.333902 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.436862 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.436906 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.436915 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.436933 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.436945 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.539428 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.539497 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.539510 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.539527 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.539547 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.641786 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.641834 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.641847 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.641863 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.641874 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.745924 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.746032 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.746069 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.746095 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.746110 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.848505 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.848615 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.848636 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.848656 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.848667 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.951005 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.951056 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.951066 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.951083 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:58 crc kubenswrapper[4869]: I0929 13:41:58.951095 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:58Z","lastTransitionTime":"2025-09-29T13:41:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.054414 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.054470 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.054484 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.054503 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.054517 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.157696 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.157742 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.157751 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.157769 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.157781 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.241581 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:41:59 crc kubenswrapper[4869]: E0929 13:41:59.241757 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.259685 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.259723 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.259733 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.259750 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.259760 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.362417 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.362475 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.362486 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.362501 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.362512 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.465182 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.465234 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.465318 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.465338 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.465351 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.567327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.567368 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.567379 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.567395 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.567600 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.670304 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.670361 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.670377 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.670395 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.670408 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.773160 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.773212 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.773225 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.773243 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.773258 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.876035 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.876084 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.876099 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.876119 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.876131 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.979186 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.979296 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.979326 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.979362 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:41:59 crc kubenswrapper[4869]: I0929 13:41:59.979384 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:41:59Z","lastTransitionTime":"2025-09-29T13:41:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.082077 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.082129 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.082138 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.082157 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.082172 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.176520 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.176762 4869 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.176884 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs podName:9d791a01-f367-41f9-bd94-a7cee0b4b7c7 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:08.176856367 +0000 UTC m=+54.617500687 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs") pod "network-metrics-daemon-mxqkf" (UID: "9d791a01-f367-41f9-bd94-a7cee0b4b7c7") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.185561 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.185633 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.185643 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.185667 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.185679 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.241362 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.241421 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.241510 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.241362 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.241703 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.241789 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.288474 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.288517 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.288534 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.288553 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.288565 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.391852 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.391896 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.391914 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.391934 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.391946 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.394723 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.394779 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.394789 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.394809 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.394820 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.408390 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:00Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.412984 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.413045 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.413055 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.413071 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.413083 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.426494 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:00Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.431871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.431949 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.431999 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.432020 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.432033 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.448114 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:00Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.454193 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.454242 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.454254 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.454275 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.454287 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.471265 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:00Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.476118 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.476168 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.476182 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.476202 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.476216 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.487712 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:00Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:00 crc kubenswrapper[4869]: E0929 13:42:00.487852 4869 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.494819 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.494865 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.494875 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.494894 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.494907 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.598305 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.598365 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.598376 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.598400 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.598414 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.700956 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.701002 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.701012 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.701029 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.701040 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.803887 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.803954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.803968 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.803992 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.804009 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.907676 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.907723 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.907734 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.907750 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:00 crc kubenswrapper[4869]: I0929 13:42:00.907762 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:00Z","lastTransitionTime":"2025-09-29T13:42:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.010163 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.010200 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.010210 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.010224 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.010234 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.113186 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.113229 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.113239 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.113253 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.113265 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.215957 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.216001 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.216012 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.216028 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.216037 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.241861 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:01 crc kubenswrapper[4869]: E0929 13:42:01.242061 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.319925 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.320145 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.320215 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.320243 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.320299 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.423480 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.423541 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.423555 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.423575 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.423589 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.526720 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.526762 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.526775 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.526793 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.526804 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.629568 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.629630 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.629642 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.629662 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.629676 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.731645 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.731693 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.731712 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.731734 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.731747 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.834242 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.834290 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.834302 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.834321 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.834333 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.936634 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.936704 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.936717 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.936733 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:01 crc kubenswrapper[4869]: I0929 13:42:01.936745 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:01Z","lastTransitionTime":"2025-09-29T13:42:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.040654 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.040700 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.040712 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.040733 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.040749 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.143000 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.143067 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.143081 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.143101 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.143117 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.241887 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.241992 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:02 crc kubenswrapper[4869]: E0929 13:42:02.242054 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:02 crc kubenswrapper[4869]: E0929 13:42:02.242177 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.242301 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:02 crc kubenswrapper[4869]: E0929 13:42:02.242401 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.247458 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.247538 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.247550 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.247566 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.247631 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.350111 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.350158 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.350168 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.350186 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.350199 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.453116 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.453165 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.453194 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.453248 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.453279 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.556039 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.556091 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.556101 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.556120 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.556132 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.659457 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.659504 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.659512 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.659528 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.659537 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.763563 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.763631 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.763650 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.763671 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.763715 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.866568 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.866630 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.866643 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.866660 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.866673 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.969995 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.970067 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.970081 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.970104 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:02 crc kubenswrapper[4869]: I0929 13:42:02.970121 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:02Z","lastTransitionTime":"2025-09-29T13:42:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.073603 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.073720 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.073739 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.073773 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.073793 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.176767 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.176822 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.176836 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.176856 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.176869 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.241080 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:03 crc kubenswrapper[4869]: E0929 13:42:03.241260 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.280118 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.280159 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.280169 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.280183 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.280198 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.383700 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.383753 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.383772 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.383794 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.383808 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.486939 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.486985 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.486999 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.487020 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.487032 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.589924 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.589972 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.589991 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.590012 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.590024 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.693224 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.693284 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.693299 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.693367 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.693387 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.795976 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.796018 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.796029 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.796046 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.796082 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.898487 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.898543 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.898557 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.898572 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:03 crc kubenswrapper[4869]: I0929 13:42:03.898582 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:03Z","lastTransitionTime":"2025-09-29T13:42:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.001385 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.001439 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.001450 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.001464 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.001472 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.104635 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.104687 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.104700 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.104715 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.104725 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.207630 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.207685 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.207699 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.207719 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.207731 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.241444 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.241504 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.241444 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:04 crc kubenswrapper[4869]: E0929 13:42:04.241623 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:04 crc kubenswrapper[4869]: E0929 13:42:04.241796 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:04 crc kubenswrapper[4869]: E0929 13:42:04.241906 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.261544 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.277403 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.292489 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.305552 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.310320 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.310379 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.310391 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.310414 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.310428 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.318131 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.335932 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.352328 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.370035 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.381120 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.392911 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.406146 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.413563 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.413671 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.413689 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.413713 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.413729 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.421457 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.437087 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.450366 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.464725 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.479269 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:04Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.517523 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.517592 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.517625 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.517651 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.517667 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.620586 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.620653 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.620665 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.620685 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.620697 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.723654 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.723695 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.723708 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.723726 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.723738 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.826212 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.826249 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.826258 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.826273 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.826282 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.929262 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.929315 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.929327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.929347 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:04 crc kubenswrapper[4869]: I0929 13:42:04.929357 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:04Z","lastTransitionTime":"2025-09-29T13:42:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.032271 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.032325 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.032381 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.032399 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.032409 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.135191 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.135398 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.135423 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.135440 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.135450 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.237728 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.237779 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.237789 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.237806 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.237818 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.241113 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:05 crc kubenswrapper[4869]: E0929 13:42:05.241258 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.340226 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.340287 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.340301 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.340319 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.340352 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.443173 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.443217 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.443233 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.443254 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.443266 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.546233 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.546281 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.546291 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.546310 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.546324 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.648856 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.648902 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.648914 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.648933 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.648948 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.688009 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.699159 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.705298 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.716920 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.734821 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.748057 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.752352 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.752428 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.752446 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.752466 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.752478 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.760937 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.772088 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.784631 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.796998 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.807944 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.819692 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.833694 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.847129 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.855744 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.855795 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.855808 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.855828 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.855840 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.859732 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.871959 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.884107 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.895478 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:05Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.944055 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.944213 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:05 crc kubenswrapper[4869]: E0929 13:42:05.944287 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:42:37.944252643 +0000 UTC m=+84.384896963 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:42:05 crc kubenswrapper[4869]: E0929 13:42:05.944371 4869 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.944636 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:05 crc kubenswrapper[4869]: E0929 13:42:05.944667 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:37.944648274 +0000 UTC m=+84.385292594 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:42:05 crc kubenswrapper[4869]: E0929 13:42:05.944751 4869 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:42:05 crc kubenswrapper[4869]: E0929 13:42:05.944809 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:37.944802068 +0000 UTC m=+84.385446388 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.959010 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.959052 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.959062 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.959079 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:05 crc kubenswrapper[4869]: I0929 13:42:05.959091 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:05Z","lastTransitionTime":"2025-09-29T13:42:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.046090 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.046191 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.046322 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.046376 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.046378 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.046394 4869 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.046400 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.046413 4869 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.046463 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:38.046442728 +0000 UTC m=+84.487087048 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.046488 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:38.046481489 +0000 UTC m=+84.487125809 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.061693 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.061774 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.061894 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.061917 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.061931 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.166293 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.166353 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.166372 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.166395 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.166409 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.241760 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.241779 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.241897 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.241962 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.242108 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:06 crc kubenswrapper[4869]: E0929 13:42:06.242149 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.269481 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.269525 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.269535 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.269554 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.269570 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.372945 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.373036 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.373067 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.373103 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.373128 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.476191 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.476243 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.476253 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.476272 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.476284 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.579201 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.579262 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.579275 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.579294 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.579307 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.682340 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.682413 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.682426 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.682449 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.682463 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.784959 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.785002 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.785012 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.785027 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.785037 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.887378 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.887431 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.887445 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.887466 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.887477 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.989881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.989923 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.989933 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.989946 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:06 crc kubenswrapper[4869]: I0929 13:42:06.989958 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:06Z","lastTransitionTime":"2025-09-29T13:42:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.092367 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.092415 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.092425 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.092442 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.092454 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.195701 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.195775 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.195789 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.195808 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.195819 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.241114 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:07 crc kubenswrapper[4869]: E0929 13:42:07.241288 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.299420 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.299507 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.299529 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.299559 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.299582 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.403173 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.403246 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.403268 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.403306 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.403338 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.506917 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.506997 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.507038 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.507072 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.507098 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.610037 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.610121 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.610149 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.610183 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.610206 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.713495 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.713552 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.713564 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.713583 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.713595 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.816517 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.816548 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.816557 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.816571 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.816580 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.919839 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.920248 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.920356 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.920479 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:07 crc kubenswrapper[4869]: I0929 13:42:07.920573 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:07Z","lastTransitionTime":"2025-09-29T13:42:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.023881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.023932 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.023946 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.023965 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.023978 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.127208 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.127281 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.127294 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.127316 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.127329 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.230992 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.231035 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.231044 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.231061 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.231072 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.241602 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.241739 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:08 crc kubenswrapper[4869]: E0929 13:42:08.241753 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:08 crc kubenswrapper[4869]: E0929 13:42:08.242005 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.242414 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:08 crc kubenswrapper[4869]: E0929 13:42:08.242693 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.242712 4869 scope.go:117] "RemoveContainer" containerID="cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.275189 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:08 crc kubenswrapper[4869]: E0929 13:42:08.275441 4869 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:42:08 crc kubenswrapper[4869]: E0929 13:42:08.275562 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs podName:9d791a01-f367-41f9-bd94-a7cee0b4b7c7 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:24.275532286 +0000 UTC m=+70.716176806 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs") pod "network-metrics-daemon-mxqkf" (UID: "9d791a01-f367-41f9-bd94-a7cee0b4b7c7") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.334926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.335371 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.335382 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.335399 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.335412 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.439060 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.439116 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.439130 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.439149 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.439160 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.542434 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.542504 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.542517 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.542537 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.542548 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.604910 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/1.log" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.620433 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.621467 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.642212 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.645053 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.645158 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.645219 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.645300 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.645367 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.656755 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.684483 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.701063 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.719800 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.736601 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.748256 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.748299 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.748311 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.748327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.748339 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.753921 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.766335 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.778279 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.793852 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.813462 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.828142 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.843905 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.850519 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.850567 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.850580 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.850600 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.850631 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.859148 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.875390 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.887988 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.900571 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:08Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.953229 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.953284 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.953296 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.953317 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:08 crc kubenswrapper[4869]: I0929 13:42:08.953327 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:08Z","lastTransitionTime":"2025-09-29T13:42:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.056489 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.056566 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.056580 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.056598 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.056613 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.209270 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.209318 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.209330 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.209352 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.209362 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.240930 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:09 crc kubenswrapper[4869]: E0929 13:42:09.241078 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.311789 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.311845 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.311858 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.311877 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.311889 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.417495 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.417541 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.417551 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.417572 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.417586 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.520791 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.520846 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.520856 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.520872 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.520881 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.623238 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.623281 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.623292 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.623308 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.623329 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.626437 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/2.log" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.627138 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/1.log" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.630807 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5" exitCode=1 Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.630869 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.630927 4869 scope.go:117] "RemoveContainer" containerID="cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.631800 4869 scope.go:117] "RemoveContainer" containerID="9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5" Sep 29 13:42:09 crc kubenswrapper[4869]: E0929 13:42:09.632072 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.647645 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.662642 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.678385 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.691014 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.707652 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.731960 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.731996 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.732011 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.732031 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.732044 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.744613 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.766888 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.789907 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.803436 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.818188 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.827964 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.834726 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.834764 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.834790 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.834810 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.834825 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.843413 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.860020 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.876938 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.895049 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb2233cd2adb89390ac0964ff83dc6d7159dbdf136ee276f4167bd9246f28f59\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"message\\\":\\\"twork policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:41:50Z is after 2025-08-24T17:21:41Z]\\\\nI0929 13:41:50.488209 6307 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0929 \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.907388 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.920061 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:09Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.937354 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.937420 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.937431 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.937449 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:09 crc kubenswrapper[4869]: I0929 13:42:09.937461 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:09Z","lastTransitionTime":"2025-09-29T13:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.040572 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.040624 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.040635 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.040651 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.040662 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.143187 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.143247 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.143257 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.143272 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.143282 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.241732 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.241908 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.242092 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.242388 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.241756 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.242889 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.245796 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.246013 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.246119 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.246221 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.246304 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.349370 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.349415 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.349428 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.349448 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.349460 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.452726 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.452794 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.452808 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.452833 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.452858 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.556119 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.556152 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.556162 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.556180 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.556190 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.637130 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/2.log" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.642166 4869 scope.go:117] "RemoveContainer" containerID="9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.642346 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.656415 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.659357 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.659394 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.659407 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.659425 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.659437 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.666442 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.666472 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.666484 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.666499 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.666509 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.673541 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.681166 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.685926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.685974 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.685985 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.686009 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.686022 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.691859 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.698986 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.704772 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.704815 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.704825 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.704846 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.704858 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.706546 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.719072 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.724277 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.724340 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.724354 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.724407 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.724422 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.730204 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.740746 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.747779 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.748131 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.748193 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.748225 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.748249 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.748268 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.764639 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.764887 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: E0929 13:42:10.765057 4869 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.767344 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.767384 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.767397 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.767418 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.767430 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.779746 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.794126 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.810278 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.827018 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.839037 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.854565 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.870738 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.870800 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.870813 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.870840 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.870853 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.871331 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.886465 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.902283 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.913190 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:10Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.974133 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.974533 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.974716 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.974837 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:10 crc kubenswrapper[4869]: I0929 13:42:10.974943 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:10Z","lastTransitionTime":"2025-09-29T13:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.078745 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.078790 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.078802 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.078819 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.078831 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.182091 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.182904 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.182947 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.182969 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.182981 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.241191 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:11 crc kubenswrapper[4869]: E0929 13:42:11.241381 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.286201 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.286251 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.286267 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.286292 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.286306 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.390302 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.390384 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.390395 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.390419 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.390429 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.494420 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.495285 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.495315 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.495345 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.495369 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.598663 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.598735 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.598750 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.598774 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.598794 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.702539 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.702694 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.702723 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.702758 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.702781 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.806047 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.806102 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.806113 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.806132 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.806142 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.909213 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.909297 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.909330 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.909362 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:11 crc kubenswrapper[4869]: I0929 13:42:11.909383 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:11Z","lastTransitionTime":"2025-09-29T13:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.012170 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.012236 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.012252 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.012283 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.012301 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.115803 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.115848 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.115860 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.115875 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.115884 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.219168 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.219236 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.219250 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.219272 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.219285 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.240866 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.240908 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.240954 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:12 crc kubenswrapper[4869]: E0929 13:42:12.241023 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:12 crc kubenswrapper[4869]: E0929 13:42:12.241117 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:12 crc kubenswrapper[4869]: E0929 13:42:12.241361 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.322283 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.322353 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.322368 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.322392 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.322408 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.425818 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.425886 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.425901 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.425924 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.425939 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.528815 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.529437 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.533298 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.533373 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.533403 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.637227 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.637317 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.637375 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.637417 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.637459 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.740851 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.740917 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.740929 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.740954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.740969 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.844985 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.845051 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.845075 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.845103 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.845121 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.948562 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.948677 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.948694 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.948715 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:12 crc kubenswrapper[4869]: I0929 13:42:12.948728 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:12Z","lastTransitionTime":"2025-09-29T13:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.052555 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.052691 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.052719 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.052754 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.052782 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.157451 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.158114 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.158168 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.158204 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.158235 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.241955 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:13 crc kubenswrapper[4869]: E0929 13:42:13.242190 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.261586 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.261717 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.261778 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.261807 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.261826 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.375778 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.375822 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.375835 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.375853 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.375865 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.479150 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.479190 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.479201 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.479217 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.479227 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.582688 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.582764 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.582785 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.582814 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.582844 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.686399 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.686436 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.686446 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.686460 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.686470 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.790102 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.790167 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.790201 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.790234 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.790256 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.894225 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.894302 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.894327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.894361 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.894401 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.998343 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.998404 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.998416 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.998438 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:13 crc kubenswrapper[4869]: I0929 13:42:13.998451 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:13Z","lastTransitionTime":"2025-09-29T13:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.102041 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.102140 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.102160 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.102190 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.102210 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.206303 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.206392 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.206415 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.206457 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.206484 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.241892 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.241891 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.241974 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:14 crc kubenswrapper[4869]: E0929 13:42:14.242152 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:14 crc kubenswrapper[4869]: E0929 13:42:14.242258 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:14 crc kubenswrapper[4869]: E0929 13:42:14.242334 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.264216 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.281799 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.297344 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.313533 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.313649 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.313687 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.313718 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.313739 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.314341 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.336471 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.352722 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.370941 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.384706 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.399544 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.412266 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.416398 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.416442 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.416452 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.416468 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.416478 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.427109 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.444233 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.456755 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.476034 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.489332 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.503833 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.520006 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.520040 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.520051 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.520066 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.520075 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.523508 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:14Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.623477 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.623685 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.623714 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.623750 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.623774 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.726938 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.727002 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.727026 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.727056 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.727079 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.830660 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.831242 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.831389 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.831528 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.831786 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.935585 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.935663 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.935675 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.935696 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:14 crc kubenswrapper[4869]: I0929 13:42:14.935707 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:14Z","lastTransitionTime":"2025-09-29T13:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.038814 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.039223 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.039261 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.039298 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.039379 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.142370 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.142440 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.142456 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.142485 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.142505 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.240992 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:15 crc kubenswrapper[4869]: E0929 13:42:15.241151 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.244875 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.244943 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.244956 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.244987 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.245003 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.348434 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.348500 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.348511 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.348527 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.348536 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.451949 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.451997 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.452010 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.452029 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.452041 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.555449 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.555520 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.555539 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.555572 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.555592 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.658690 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.658764 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.658777 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.658791 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.658801 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.762555 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.762644 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.762656 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.762676 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.762688 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.866036 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.866102 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.866168 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.866206 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.866229 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.969546 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.969673 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.969705 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.969735 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:15 crc kubenswrapper[4869]: I0929 13:42:15.969755 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:15Z","lastTransitionTime":"2025-09-29T13:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.073003 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.073163 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.073300 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.073327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.073340 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.176095 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.176171 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.176186 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.176204 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.176218 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.241180 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.241247 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.241386 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:16 crc kubenswrapper[4869]: E0929 13:42:16.241529 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:16 crc kubenswrapper[4869]: E0929 13:42:16.241903 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:16 crc kubenswrapper[4869]: E0929 13:42:16.241811 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.281287 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.281363 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.281380 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.281403 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.281419 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.383913 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.383951 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.383960 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.383976 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.383985 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.486772 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.486808 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.486817 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.486833 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.486843 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.589554 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.589633 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.589647 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.589682 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.589695 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.692168 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.692224 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.692236 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.692254 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.692266 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.794803 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.794858 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.794872 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.794892 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.794906 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.897169 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.897217 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.897234 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.897251 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.897262 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.999706 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.999745 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.999755 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.999769 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:16 crc kubenswrapper[4869]: I0929 13:42:16.999780 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:16Z","lastTransitionTime":"2025-09-29T13:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.102268 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.102310 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.102321 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.102337 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.102349 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.205193 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.205236 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.205246 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.205263 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.205272 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.241466 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:17 crc kubenswrapper[4869]: E0929 13:42:17.241644 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.309025 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.309135 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.309152 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.309176 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.309197 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.412817 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.412885 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.412904 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.412931 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.412950 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.515773 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.515826 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.515839 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.515862 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.515875 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.618249 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.618315 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.618327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.618344 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.618356 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.721154 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.721199 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.721212 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.721231 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.721244 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.824809 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.824879 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.824904 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.824934 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.824958 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.929289 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.929365 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.929388 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.929417 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:17 crc kubenswrapper[4869]: I0929 13:42:17.929442 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:17Z","lastTransitionTime":"2025-09-29T13:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.032200 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.032252 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.032264 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.032278 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.032290 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.135272 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.135327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.135340 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.135358 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.135369 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.238303 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.238350 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.238361 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.238384 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.238396 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.241723 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.241826 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:18 crc kubenswrapper[4869]: E0929 13:42:18.241847 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:18 crc kubenswrapper[4869]: E0929 13:42:18.241963 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.241823 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:18 crc kubenswrapper[4869]: E0929 13:42:18.242098 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.341728 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.341785 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.341801 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.341825 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.341841 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.444506 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.444559 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.444570 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.444587 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.444599 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.547327 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.547375 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.547386 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.547901 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.554301 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.656756 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.656817 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.656828 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.656850 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.656860 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.759255 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.759317 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.759329 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.759343 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.759354 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.862781 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.862838 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.862851 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.862871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.862884 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.965937 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.965967 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.965978 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.965992 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:18 crc kubenswrapper[4869]: I0929 13:42:18.966001 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:18Z","lastTransitionTime":"2025-09-29T13:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.069598 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.069666 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.069675 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.069690 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.069701 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.172788 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.172821 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.172832 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.172846 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.172857 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.241447 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:19 crc kubenswrapper[4869]: E0929 13:42:19.241766 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.275446 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.275518 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.275538 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.275573 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.275592 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.378301 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.378359 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.378371 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.378388 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.378399 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.481596 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.481708 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.481730 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.481757 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.481830 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.585204 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.585282 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.585295 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.585324 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.585338 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.689028 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.689090 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.689113 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.689138 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.689154 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.792176 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.792243 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.792262 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.792287 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.792307 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.894803 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.894852 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.894864 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.894885 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.894898 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.998033 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.998080 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.998092 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.998113 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:19 crc kubenswrapper[4869]: I0929 13:42:19.998123 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:19Z","lastTransitionTime":"2025-09-29T13:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.101088 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.101132 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.101143 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.101162 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.101172 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.203898 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.203965 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.203987 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.204012 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.204028 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.241477 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.241573 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:20 crc kubenswrapper[4869]: E0929 13:42:20.241676 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.241583 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:20 crc kubenswrapper[4869]: E0929 13:42:20.241809 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:20 crc kubenswrapper[4869]: E0929 13:42:20.241891 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.306246 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.306297 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.306309 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.306329 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.306343 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.409134 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.409193 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.409203 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.409219 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.409230 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.512069 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.512150 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.512164 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.512185 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.512199 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.615775 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.615848 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.615872 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.615898 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.615917 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.718519 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.718588 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.718598 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.718629 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.718641 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.821051 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.821104 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.821115 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.821130 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.821139 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.923788 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.923893 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.923913 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.923930 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.923940 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.957997 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.958073 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.958087 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.958106 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.958118 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: E0929 13:42:20.976163 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:20Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.981368 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.981404 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.981415 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.981432 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.981447 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:20 crc kubenswrapper[4869]: E0929 13:42:20.994378 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:20Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.998046 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.998082 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.998097 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.998116 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:20 crc kubenswrapper[4869]: I0929 13:42:20.998132 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:20Z","lastTransitionTime":"2025-09-29T13:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: E0929 13:42:21.011507 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:21Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.015990 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.016027 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.016038 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.016061 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.016072 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: E0929 13:42:21.028427 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:21Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.032438 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.032478 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.032489 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.032506 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.032521 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: E0929 13:42:21.044423 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:21Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:21 crc kubenswrapper[4869]: E0929 13:42:21.044599 4869 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.046335 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.046389 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.046402 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.046425 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.046471 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.149483 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.149526 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.149537 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.149552 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.149563 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.241207 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:21 crc kubenswrapper[4869]: E0929 13:42:21.241365 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.252254 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.252320 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.252337 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.252359 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.252374 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.355205 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.355271 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.355288 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.355312 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.355327 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.457404 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.457462 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.457477 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.457498 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.457509 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.560623 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.560681 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.560694 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.560721 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.560758 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.663315 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.663361 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.663370 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.663389 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.663400 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.765707 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.765763 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.765780 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.765801 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.765814 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.868330 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.868369 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.868380 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.868397 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.868408 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.971926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.971989 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.972005 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.972031 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:21 crc kubenswrapper[4869]: I0929 13:42:21.972048 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:21Z","lastTransitionTime":"2025-09-29T13:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.074649 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.074714 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.074728 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.074749 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.074763 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.177837 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.177885 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.177895 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.177913 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.177924 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.241662 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:22 crc kubenswrapper[4869]: E0929 13:42:22.241821 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.242071 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:22 crc kubenswrapper[4869]: E0929 13:42:22.242152 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.242465 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:22 crc kubenswrapper[4869]: E0929 13:42:22.242527 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.280001 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.280068 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.280079 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.280096 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.280106 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.382465 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.382511 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.382521 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.382537 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.382550 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.484860 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.484937 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.484953 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.484970 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.484981 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.587312 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.587353 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.587365 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.587384 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.587396 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.689633 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.689690 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.689706 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.689725 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.689738 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.792487 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.792537 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.792548 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.792567 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.792578 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.894955 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.894998 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.895008 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.895026 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.895038 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.998161 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.998218 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.998229 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.998254 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:22 crc kubenswrapper[4869]: I0929 13:42:22.998267 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:22Z","lastTransitionTime":"2025-09-29T13:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.102823 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.102915 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.102932 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.102954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.102967 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.205659 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.205730 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.205743 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.205761 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.205773 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.240852 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:23 crc kubenswrapper[4869]: E0929 13:42:23.241059 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.252104 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.309125 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.309176 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.309187 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.309205 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.309215 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.412157 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.412195 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.412206 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.412225 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.412235 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.515314 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.515368 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.515378 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.515397 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.515410 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.618902 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.618958 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.618979 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.619008 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.619023 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.722419 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.722464 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.722474 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.722491 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.722504 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.825179 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.825244 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.825256 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.825277 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.825296 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.928066 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.928129 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.928143 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.928179 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:23 crc kubenswrapper[4869]: I0929 13:42:23.928190 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:23Z","lastTransitionTime":"2025-09-29T13:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.031219 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.031737 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.031823 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.031903 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.031978 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.134879 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.135275 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.135518 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.135604 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.135699 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.239172 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.239456 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.239560 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.239687 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.239789 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.241493 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.241593 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.241655 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:24 crc kubenswrapper[4869]: E0929 13:42:24.241760 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:24 crc kubenswrapper[4869]: E0929 13:42:24.241946 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:24 crc kubenswrapper[4869]: E0929 13:42:24.242032 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.256506 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.273263 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.288775 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.302423 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.316504 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.328422 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.342511 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.342587 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.342603 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.342641 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.342663 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.345486 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: E0929 13:42:24.346321 4869 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:42:24 crc kubenswrapper[4869]: E0929 13:42:24.346402 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs podName:9d791a01-f367-41f9-bd94-a7cee0b4b7c7 nodeName:}" failed. No retries permitted until 2025-09-29 13:42:56.346377103 +0000 UTC m=+102.787021423 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs") pod "network-metrics-daemon-mxqkf" (UID: "9d791a01-f367-41f9-bd94-a7cee0b4b7c7") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.346107 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.359024 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.371564 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db54ec77-cb9b-46ac-81e4-9f86feacd079\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6526787ccb6c5318be1e8b1d93919f5d1ec919edf66fae2865dc6c59c21b2b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.382930 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.396704 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.416509 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.428050 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.443598 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.447380 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.447426 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.447446 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.447467 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.447483 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.462606 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.482201 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.497834 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.519694 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:24Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.549788 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.549835 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.549844 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.549859 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.549869 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.653422 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.653469 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.653484 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.653503 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.653517 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.756802 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.756857 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.756871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.756891 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.756904 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.860458 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.860518 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.860531 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.860550 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.860563 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.963580 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.963672 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.963691 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.963717 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:24 crc kubenswrapper[4869]: I0929 13:42:24.963736 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:24Z","lastTransitionTime":"2025-09-29T13:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.066313 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.066364 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.066376 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.066396 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.066408 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.169640 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.169701 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.169715 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.169734 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.169745 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.241419 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:25 crc kubenswrapper[4869]: E0929 13:42:25.241640 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.242510 4869 scope.go:117] "RemoveContainer" containerID="9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5" Sep 29 13:42:25 crc kubenswrapper[4869]: E0929 13:42:25.242753 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.273180 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.273238 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.273288 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.273315 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.273338 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.376532 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.376593 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.376628 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.376653 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.376669 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.479582 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.479675 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.479685 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.479700 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.479710 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.584340 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.584418 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.584438 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.584467 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.584488 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.687745 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.687786 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.687800 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.687817 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.687829 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.790948 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.790983 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.790995 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.791012 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.791024 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.894223 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.894561 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.894571 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.894586 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.894595 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.997258 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.997306 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.997317 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.997333 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:25 crc kubenswrapper[4869]: I0929 13:42:25.997345 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:25Z","lastTransitionTime":"2025-09-29T13:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.100957 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.101045 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.101059 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.101080 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.101095 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.203994 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.204050 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.204063 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.204083 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.204096 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.241896 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.241955 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.241966 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:26 crc kubenswrapper[4869]: E0929 13:42:26.242098 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:26 crc kubenswrapper[4869]: E0929 13:42:26.242232 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:26 crc kubenswrapper[4869]: E0929 13:42:26.242340 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.306887 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.306947 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.306960 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.306980 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.306996 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.409452 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.409504 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.409516 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.409531 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.409542 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.512266 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.512339 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.512351 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.512370 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.512383 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.614645 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.614690 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.614704 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.614724 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.614736 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.694220 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/0.log" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.694274 4869 generic.go:334] "Generic (PLEG): container finished" podID="0e924d34-8790-41e8-a11a-91a1d0c625ca" containerID="599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6" exitCode=1 Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.694308 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vs8mc" event={"ID":"0e924d34-8790-41e8-a11a-91a1d0c625ca","Type":"ContainerDied","Data":"599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.694685 4869 scope.go:117] "RemoveContainer" containerID="599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.713632 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.720137 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.720188 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.720199 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.720216 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.720233 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.727071 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.741824 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.754513 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:25Z\\\",\\\"message\\\":\\\"2025-09-29T13:41:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f\\\\n2025-09-29T13:41:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f to /host/opt/cni/bin/\\\\n2025-09-29T13:41:40Z [verbose] multus-daemon started\\\\n2025-09-29T13:41:40Z [verbose] Readiness Indicator file check\\\\n2025-09-29T13:42:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.770131 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.780564 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.795538 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.810545 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.822672 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.822910 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.822968 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.822978 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.822998 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.823009 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.833808 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.846677 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.860329 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.874091 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.886165 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.896519 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db54ec77-cb9b-46ac-81e4-9f86feacd079\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6526787ccb6c5318be1e8b1d93919f5d1ec919edf66fae2865dc6c59c21b2b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.906884 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.919914 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.925766 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.925806 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.925817 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.925835 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.925846 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:26Z","lastTransitionTime":"2025-09-29T13:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:26 crc kubenswrapper[4869]: I0929 13:42:26.933796 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:26Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.071064 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.071119 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.071137 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.071155 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.071170 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.174087 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.174145 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.174161 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.174184 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.174198 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.241000 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:27 crc kubenswrapper[4869]: E0929 13:42:27.241237 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.277502 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.277573 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.277587 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.277606 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.277636 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.379893 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.379941 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.379954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.379971 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.379982 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.482833 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.482891 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.482906 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.482926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.482941 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.586062 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.586123 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.586135 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.586152 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.586165 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.688868 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.688934 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.688945 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.688962 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.688975 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.700576 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/0.log" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.700729 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vs8mc" event={"ID":"0e924d34-8790-41e8-a11a-91a1d0c625ca","Type":"ContainerStarted","Data":"c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.722081 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.739658 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.755912 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db54ec77-cb9b-46ac-81e4-9f86feacd079\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6526787ccb6c5318be1e8b1d93919f5d1ec919edf66fae2865dc6c59c21b2b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.771685 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.793487 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.793594 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.793626 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.793646 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.793661 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.798003 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.810056 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.822643 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.839870 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:25Z\\\",\\\"message\\\":\\\"2025-09-29T13:41:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f\\\\n2025-09-29T13:41:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f to /host/opt/cni/bin/\\\\n2025-09-29T13:41:40Z [verbose] multus-daemon started\\\\n2025-09-29T13:41:40Z [verbose] Readiness Indicator file check\\\\n2025-09-29T13:42:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.855666 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.868883 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.888536 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.896103 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.896140 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.896163 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.896181 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.896192 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.903733 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.923412 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.940538 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.961096 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.981560 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.998517 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.998819 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.998920 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.999037 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.999133 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:27Z","lastTransitionTime":"2025-09-29T13:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:27 crc kubenswrapper[4869]: I0929 13:42:27.998683 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:27Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.012750 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:28Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.102061 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.102095 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.102105 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.102122 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.102134 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.205501 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.205575 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.205601 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.205676 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.205697 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.241027 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:28 crc kubenswrapper[4869]: E0929 13:42:28.241486 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.241824 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:28 crc kubenswrapper[4869]: E0929 13:42:28.242068 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.241039 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:28 crc kubenswrapper[4869]: E0929 13:42:28.242679 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.310170 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.310230 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.310248 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.310276 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.310294 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.412670 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.412906 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.412982 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.413101 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.413173 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.515512 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.515871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.515937 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.515998 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.516064 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.618480 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.618791 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.618924 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.619024 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.619089 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.722871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.722926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.722946 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.722975 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.722993 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.826500 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.826586 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.826653 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.826683 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.826703 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.930520 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.930965 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.931077 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.931225 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:28 crc kubenswrapper[4869]: I0929 13:42:28.931344 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:28Z","lastTransitionTime":"2025-09-29T13:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.041592 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.041695 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.041722 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.041755 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.041779 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.146083 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.146159 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.146170 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.146189 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.146200 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.241369 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:29 crc kubenswrapper[4869]: E0929 13:42:29.242081 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.249225 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.249290 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.249310 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.249338 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.249357 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.351856 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.351908 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.351923 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.351942 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.351958 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.455794 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.456085 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.456153 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.456271 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.456386 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.559124 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.559177 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.559190 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.559208 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.559221 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.662187 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.662246 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.662259 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.662281 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.662293 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.764499 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.764542 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.764552 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.764569 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.764580 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.867061 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.867119 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.867131 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.867151 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.867166 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.969810 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.969884 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.969907 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.969941 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:29 crc kubenswrapper[4869]: I0929 13:42:29.969963 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:29Z","lastTransitionTime":"2025-09-29T13:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.072675 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.072725 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.072739 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.072760 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.072776 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.175816 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.175866 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.175876 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.175892 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.175906 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.241075 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.241200 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:30 crc kubenswrapper[4869]: E0929 13:42:30.241340 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:30 crc kubenswrapper[4869]: E0929 13:42:30.241467 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.241487 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:30 crc kubenswrapper[4869]: E0929 13:42:30.241795 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.278392 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.278454 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.278466 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.278487 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.278500 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.381595 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.381664 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.381676 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.381691 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.381703 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.484621 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.484677 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.484692 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.484713 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.484726 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.587539 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.587576 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.587589 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.587673 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.587695 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.690934 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.691009 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.691026 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.691049 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.691063 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.793884 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.793931 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.793942 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.793959 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.793970 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.896487 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.896820 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.896921 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.897034 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.897127 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.999641 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.999675 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:30 crc kubenswrapper[4869]: I0929 13:42:30.999684 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:30.999699 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:30.999712 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:30Z","lastTransitionTime":"2025-09-29T13:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.101970 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.102007 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.102018 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.102041 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.102056 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.149495 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.149551 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.149565 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.149587 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.149629 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: E0929 13:42:31.161120 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:31Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.164602 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.164665 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.164678 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.164695 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.164705 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: E0929 13:42:31.175357 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:31Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.178594 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.178662 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.178674 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.178691 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.178702 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: E0929 13:42:31.189102 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:31Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.193203 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.193240 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.193253 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.193273 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.193287 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: E0929 13:42:31.203736 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:31Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.207002 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.207041 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.207051 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.207065 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.207077 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: E0929 13:42:31.217063 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:31Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:31 crc kubenswrapper[4869]: E0929 13:42:31.217186 4869 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.218849 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.218878 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.218887 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.218905 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.218916 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.241547 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:31 crc kubenswrapper[4869]: E0929 13:42:31.241731 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.321150 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.321186 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.321195 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.321210 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.321222 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.424249 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.424285 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.424295 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.424310 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.424321 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.527077 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.527115 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.527124 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.527140 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.527150 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.629365 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.629702 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.629781 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.629848 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.629911 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.732290 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.732336 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.732351 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.732370 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.732383 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.834811 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.834855 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.834867 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.834881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.834892 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.937505 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.937836 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.937964 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.938109 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:31 crc kubenswrapper[4869]: I0929 13:42:31.938211 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:31Z","lastTransitionTime":"2025-09-29T13:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.042197 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.042513 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.042630 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.042735 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.042866 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.146081 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.146343 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.146544 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.146920 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.146977 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.241854 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.242006 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.242058 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:32 crc kubenswrapper[4869]: E0929 13:42:32.242426 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:32 crc kubenswrapper[4869]: E0929 13:42:32.242378 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:32 crc kubenswrapper[4869]: E0929 13:42:32.242634 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.250150 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.250201 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.250211 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.250230 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.250242 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.353385 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.354097 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.354238 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.354406 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.354553 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.459162 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.459247 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.459258 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.459303 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.459314 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.562825 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.562897 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.562918 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.562951 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.562975 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.665386 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.665438 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.665452 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.665472 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.665489 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.768452 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.768536 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.768560 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.768589 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.768642 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.871733 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.871809 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.871829 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.871889 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.871908 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.975255 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.975334 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.975353 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.975380 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:32 crc kubenswrapper[4869]: I0929 13:42:32.975399 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:32Z","lastTransitionTime":"2025-09-29T13:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.078324 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.078371 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.078383 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.078429 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.078442 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.181770 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.182139 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.182221 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.182299 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.182364 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.240847 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:33 crc kubenswrapper[4869]: E0929 13:42:33.241497 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.286832 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.286902 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.286917 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.287107 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.287137 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.391403 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.391474 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.391496 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.391524 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.391544 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.494461 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.494504 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.494514 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.494531 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.494542 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.597186 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.597223 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.597234 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.597249 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.597264 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.701125 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.701198 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.701217 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.701241 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.701257 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.804566 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.804660 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.804677 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.804698 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.804711 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.907319 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.908222 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.908346 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.908447 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:33 crc kubenswrapper[4869]: I0929 13:42:33.908521 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:33Z","lastTransitionTime":"2025-09-29T13:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.012818 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.012871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.012881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.012898 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.012909 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.114811 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.114919 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.114929 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.114961 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.115006 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.218127 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.218593 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.218638 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.218661 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.218675 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.241760 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.241790 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:34 crc kubenswrapper[4869]: E0929 13:42:34.241979 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.242186 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:34 crc kubenswrapper[4869]: E0929 13:42:34.242338 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:34 crc kubenswrapper[4869]: E0929 13:42:34.242540 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.257202 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.269290 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.280658 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db54ec77-cb9b-46ac-81e4-9f86feacd079\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6526787ccb6c5318be1e8b1d93919f5d1ec919edf66fae2865dc6c59c21b2b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.295650 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.309485 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.321095 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.322467 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.322508 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.322519 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.322537 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.322548 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.334749 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.351088 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.366288 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:25Z\\\",\\\"message\\\":\\\"2025-09-29T13:41:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f\\\\n2025-09-29T13:41:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f to /host/opt/cni/bin/\\\\n2025-09-29T13:41:40Z [verbose] multus-daemon started\\\\n2025-09-29T13:41:40Z [verbose] Readiness Indicator file check\\\\n2025-09-29T13:42:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.382230 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.395799 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.415147 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.426083 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.426390 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.426510 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.426653 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.429876 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.431280 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.449069 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.463437 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.477600 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.492030 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.503796 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:34Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.534087 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.534149 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.534162 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.534181 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.534192 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.636577 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.636690 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.636755 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.636789 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.636809 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.741542 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.741626 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.741641 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.741662 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.741673 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.845141 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.845215 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.845239 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.845272 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.845294 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.948808 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.948927 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.948943 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.948962 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:34 crc kubenswrapper[4869]: I0929 13:42:34.948974 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:34Z","lastTransitionTime":"2025-09-29T13:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.051807 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.051959 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.051984 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.052020 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.052044 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.155852 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.155929 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.155943 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.155966 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.155981 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.241827 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:35 crc kubenswrapper[4869]: E0929 13:42:35.242026 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.259381 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.259438 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.259460 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.259486 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.259506 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.363158 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.363222 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.363248 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.363281 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.363307 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.466164 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.466225 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.466239 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.466262 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.466275 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.569823 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.569892 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.569912 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.569941 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.569960 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.673390 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.673467 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.673484 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.673505 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.673519 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.776229 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.776265 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.776276 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.776291 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.776301 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.879040 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.879082 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.879092 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.879117 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.879129 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.987557 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.987634 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.987647 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.987663 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:35 crc kubenswrapper[4869]: I0929 13:42:35.987674 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:35Z","lastTransitionTime":"2025-09-29T13:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.090433 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.090715 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.090751 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.090772 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.090785 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.193350 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.193666 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.193757 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.193876 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.193973 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.241648 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.241705 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.241647 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:36 crc kubenswrapper[4869]: E0929 13:42:36.241808 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:36 crc kubenswrapper[4869]: E0929 13:42:36.241890 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:36 crc kubenswrapper[4869]: E0929 13:42:36.242097 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.297660 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.297928 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.297995 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.298069 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.298146 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.400538 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.400596 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.400635 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.400656 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.400666 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.503753 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.503798 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.503813 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.503831 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.503841 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.606840 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.606880 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.606890 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.606916 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.606926 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.709196 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.709463 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.709563 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.709674 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.709773 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.812856 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.812910 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.812942 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.812967 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.812983 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.915112 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.915183 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.915197 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.915249 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:36 crc kubenswrapper[4869]: I0929 13:42:36.915261 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:36Z","lastTransitionTime":"2025-09-29T13:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.018223 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.018277 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.018288 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.018305 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.018318 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.121469 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.121774 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.121839 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.121911 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.121974 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.224443 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.224504 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.224517 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.224537 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.224552 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.242026 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:37 crc kubenswrapper[4869]: E0929 13:42:37.242182 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.243008 4869 scope.go:117] "RemoveContainer" containerID="9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.328150 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.328227 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.328240 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.328268 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.328279 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.430673 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.430717 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.430727 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.430743 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.430757 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.532838 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.532964 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.532974 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.532988 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.532999 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.636074 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.636115 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.636126 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.636140 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.636151 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.737421 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/2.log" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.739519 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.740040 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.741048 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.741091 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.741108 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.741125 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.741136 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.753115 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db54ec77-cb9b-46ac-81e4-9f86feacd079\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6526787ccb6c5318be1e8b1d93919f5d1ec919edf66fae2865dc6c59c21b2b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.768406 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.784830 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.799150 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.814666 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.824864 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.837151 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:25Z\\\",\\\"message\\\":\\\"2025-09-29T13:41:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f\\\\n2025-09-29T13:41:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f to /host/opt/cni/bin/\\\\n2025-09-29T13:41:40Z [verbose] multus-daemon started\\\\n2025-09-29T13:41:40Z [verbose] Readiness Indicator file check\\\\n2025-09-29T13:42:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.844997 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.845046 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.845060 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.845087 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.845100 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.856208 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.867462 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.884690 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:42:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.895641 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.906891 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.918162 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.931098 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.946537 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.948206 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.948250 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.948260 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.948277 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.948287 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:37Z","lastTransitionTime":"2025-09-29T13:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.957249 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.969537 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:37 crc kubenswrapper[4869]: I0929 13:42:37.983854 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:37Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.001319 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.001531 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.001572 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.001703 4869 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.001784 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.001766323 +0000 UTC m=+148.442410643 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.001856 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.001822274 +0000 UTC m=+148.442466594 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.001850 4869 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.001956 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.001947137 +0000 UTC m=+148.442591457 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.051071 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.051123 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.051137 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.051155 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.051166 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.102869 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.102931 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.103094 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.103114 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.103127 4869 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.103142 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.103180 4869 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.103192 4869 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.103199 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.10317721 +0000 UTC m=+148.543821530 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.103256 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.103237252 +0000 UTC m=+148.543881572 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.152926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.152969 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.152978 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.152995 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.153004 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.241746 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.241763 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.241760 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.241891 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.242019 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:38 crc kubenswrapper[4869]: E0929 13:42:38.242157 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.254827 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.254872 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.254882 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.254899 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.254909 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.357464 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.357520 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.357533 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.357554 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.357569 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.459832 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.459868 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.459877 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.459891 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.459901 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.562775 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.562822 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.562837 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.562860 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.562877 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.665015 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.665089 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.665109 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.665135 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.665153 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.768493 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.768554 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.768572 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.768595 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.768637 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.871957 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.872050 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.872079 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.872119 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.872153 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.975702 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.975769 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.975792 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.975822 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:38 crc kubenswrapper[4869]: I0929 13:42:38.975842 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:38Z","lastTransitionTime":"2025-09-29T13:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.078960 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.079027 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.079043 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.079066 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.079081 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.181924 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.181977 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.181990 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.182009 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.182021 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.241408 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:39 crc kubenswrapper[4869]: E0929 13:42:39.241645 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.285328 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.285390 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.285406 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.285429 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.285441 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.388437 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.388501 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.388511 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.388527 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.388540 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.492295 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.492332 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.492341 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.492357 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.492367 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.595631 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.595705 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.595726 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.595757 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.595775 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.698242 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.698305 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.698319 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.698337 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.698350 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.751522 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/3.log" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.752395 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/2.log" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.754981 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" exitCode=1 Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.755040 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.755098 4869 scope.go:117] "RemoveContainer" containerID="9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.756106 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:42:39 crc kubenswrapper[4869]: E0929 13:42:39.756321 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.774137 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.788588 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.801254 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.801508 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.801527 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.801536 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.801551 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.801577 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.820140 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:38Z\\\",\\\"message\\\":\\\"e (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:42:38.394840 6932 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0929 13:42:38.394894 6932 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0929 13:42:38.394904 6932 factory.go:656] Stopping watch factory\\\\nI0929 13:42:38.394936 6932 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0929 13:42:38.394990 6932 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:42:38.395147 6932 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:42:38.395295 6932 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:42:38.395567 6932 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.835877 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.853340 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.869334 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:25Z\\\",\\\"message\\\":\\\"2025-09-29T13:41:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f\\\\n2025-09-29T13:41:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f to /host/opt/cni/bin/\\\\n2025-09-29T13:41:40Z [verbose] multus-daemon started\\\\n2025-09-29T13:41:40Z [verbose] Readiness Indicator file check\\\\n2025-09-29T13:42:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.886711 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.901815 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.903936 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.903985 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.904009 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.904045 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.904077 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:39Z","lastTransitionTime":"2025-09-29T13:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.912896 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.930534 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.948683 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.964309 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.977876 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:39 crc kubenswrapper[4869]: I0929 13:42:39.990191 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:39Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.002029 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.006332 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.006380 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.006391 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.006412 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.006424 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.012478 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db54ec77-cb9b-46ac-81e4-9f86feacd079\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6526787ccb6c5318be1e8b1d93919f5d1ec919edf66fae2865dc6c59c21b2b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.022677 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:40Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.109238 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.109333 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.109352 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.109383 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.109405 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.212230 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.212278 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.212291 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.212310 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.212323 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.241063 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.241195 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:40 crc kubenswrapper[4869]: E0929 13:42:40.241329 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.241395 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:40 crc kubenswrapper[4869]: E0929 13:42:40.241574 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:40 crc kubenswrapper[4869]: E0929 13:42:40.241750 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.314894 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.314932 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.314942 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.314958 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.314968 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.419234 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.419281 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.419292 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.419309 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.419321 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.521602 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.521711 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.521734 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.521765 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.521789 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.627507 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.627551 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.627567 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.627605 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.627631 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.731531 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.731578 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.731588 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.731605 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.731642 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.760596 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/3.log" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.834517 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.834571 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.834582 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.834605 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.834647 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.937941 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.937982 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.937994 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.938012 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:40 crc kubenswrapper[4869]: I0929 13:42:40.938024 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:40Z","lastTransitionTime":"2025-09-29T13:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.040849 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.040918 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.040939 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.040972 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.041008 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.144164 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.144228 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.144243 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.144265 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.144285 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.241274 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:41 crc kubenswrapper[4869]: E0929 13:42:41.241526 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.247641 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.247686 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.247702 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.247719 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.247732 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.290040 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.290092 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.290105 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.290124 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.290135 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: E0929 13:42:41.305083 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.309331 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.309379 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.309395 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.309416 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.309428 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: E0929 13:42:41.321997 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.325478 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.325533 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.325550 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.325573 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.325590 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: E0929 13:42:41.338087 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.341870 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.341920 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.341936 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.341954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.341965 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: E0929 13:42:41.353586 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.357883 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.357931 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.357945 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.357966 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.357979 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: E0929 13:42:41.370677 4869 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2ba3b3c6-8b84-48e7-886c-e0d5c4e18b37\\\",\\\"systemUUID\\\":\\\"5122158e-3b60-4d27-a340-00c79e99c195\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:41Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:41 crc kubenswrapper[4869]: E0929 13:42:41.370811 4869 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.372820 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.372878 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.372892 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.372914 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.372955 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.475661 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.475716 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.475729 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.475749 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.475768 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.577912 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.577945 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.577954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.577970 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.577980 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.680908 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.681178 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.681190 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.681356 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.681394 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.784441 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.784514 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.784543 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.784576 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.784599 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.888030 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.888091 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.888114 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.888140 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.888152 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.991301 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.991401 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.991416 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.991433 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:41 crc kubenswrapper[4869]: I0929 13:42:41.991443 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:41Z","lastTransitionTime":"2025-09-29T13:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.094207 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.094259 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.094271 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.094288 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.094299 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.197747 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.198016 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.198025 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.198039 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.198050 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.241407 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.241516 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:42 crc kubenswrapper[4869]: E0929 13:42:42.241578 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.241625 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:42 crc kubenswrapper[4869]: E0929 13:42:42.241704 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:42 crc kubenswrapper[4869]: E0929 13:42:42.241851 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.300894 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.300954 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.300966 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.300986 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.300998 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.404156 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.404216 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.404226 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.404264 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.404277 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.507541 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.507641 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.507663 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.507696 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.507719 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.609816 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.609881 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.609892 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.609909 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.609918 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.713782 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.713871 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.713888 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.713913 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.713936 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.817171 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.817251 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.817272 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.817300 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.817320 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.919596 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.919654 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.919664 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.919680 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:42 crc kubenswrapper[4869]: I0929 13:42:42.919690 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:42Z","lastTransitionTime":"2025-09-29T13:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.022494 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.022536 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.022547 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.022563 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.022574 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.125914 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.125994 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.126014 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.126044 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.126064 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.230013 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.230088 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.230114 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.230148 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.230171 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.241664 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:43 crc kubenswrapper[4869]: E0929 13:42:43.241855 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.333824 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.333880 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.333893 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.333911 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.333923 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.437518 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.437650 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.437683 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.437716 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.437738 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.541000 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.541084 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.541192 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.541279 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.541309 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.643713 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.643783 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.643796 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.643818 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.643832 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.747027 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.747085 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.747101 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.747121 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.747134 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.849944 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.850027 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.850047 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.850076 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.850097 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.952478 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.952535 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.952545 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.952561 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:43 crc kubenswrapper[4869]: I0929 13:42:43.952571 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:43Z","lastTransitionTime":"2025-09-29T13:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.056740 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.056812 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.056832 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.056858 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.056876 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.159376 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.159408 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.159418 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.159432 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.159441 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.241067 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:44 crc kubenswrapper[4869]: E0929 13:42:44.241224 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.241424 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:44 crc kubenswrapper[4869]: E0929 13:42:44.241568 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.241917 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:44 crc kubenswrapper[4869]: E0929 13:42:44.242011 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.262469 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.262512 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.262522 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.262539 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.262554 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.262570 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2621055c-1300-4465-83ae-1409f3d40539\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b727b0f50a6c044b5934a76a6f4092ac112941d92071de9110d4c13fc5cc9d3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfa8a31589a27757c508981a285cc0198ca395546a19a947eceb0944b00a75c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa324f783678ed17df7b31aa0fa1845552a45412df062f71f2bac0ed64cb6fa5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89e5a3bf81b5e5e8f799468d5ea2cbb94015cee2864e8b8bfd8e34b396edf397\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.285057 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbc0e07066bdaa74862bda6d5e61da5f6bdac636636575eb688c813a8f990f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.302887 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c917ce544d79391ede8bfa5bff50bee573f45d21720dd4ffa05ead04f61bff94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.320053 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.338831 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5078558d39b7429c6c9bcfca7d9c9b8be9c66b4d58d2b9ce3071aec5390cf2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8206692890be5e5eb5cfa724f50af855f7aa8c7289eaa4c4621324a7bc77ec21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.351996 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qb6gv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mxqkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.364682 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.364801 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.364814 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.364832 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.364844 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.368972 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfaec85b-c6d8-4dfe-9af8-36d6202a628a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64a532ebc363e7fc644bdfd38451710b7c9a0074cc93bcfe2992afec1314f00c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f41b973fc652328b47d1b5be4e14da698f7ce282e4e967c6d20618f49eec88e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0037c81f2fa529d85d98585df6648ecf1ac85056fab692933543592e432cc331\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7465dddc3773ffee9bfc8e5eef2bf3c47e0300df47cac0d54fb304d9d7019566\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46bf974e86013f3e7722a16ce2136a3e01b09e6bef41b82adffd50d76ebc3c5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-09-29T13:41:28Z\\\",\\\"message\\\":\\\"W0929 13:41:17.480934 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0929 13:41:17.481253 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759153277 cert, and key in /tmp/serving-cert-3216183954/serving-signer.crt, /tmp/serving-cert-3216183954/serving-signer.key\\\\nI0929 13:41:17.768286 1 observer_polling.go:159] Starting file observer\\\\nW0929 13:41:17.770744 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0929 13:41:17.770942 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0929 13:41:17.773082 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3216183954/tls.crt::/tmp/serving-cert-3216183954/tls.key\\\\\\\"\\\\nF0929 13:41:28.183915 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68d601fe6d9105a24f6ba2ae1fbe5c568b722fbf68869fbb7e1b08970cee1689\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63779a2539a590e63d76ef08da05c6ff81d9c251b8e61b40fd5460e3a4dc9f5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.383913 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b81c3431-bd0b-4590-98cd-3044fd78ef62\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4144aa159366ce95f263f17646c22d2aec8461bc85b25c7bc176bcd3b0af7514\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cd3f3652ddfd3079885a34b6acf8853e49d46fc7c2f2274b79cff0d707d8689\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15597f2a90bc1c35803b4d0d67b60879838fd39ae25c0eaf02c6e4f4edfab9a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6558e97cfa98b6bfcbf5fe503c7c8610c85618c02ce8383586da9b7c8e02f283\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.398936 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db54ec77-cb9b-46ac-81e4-9f86feacd079\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6526787ccb6c5318be1e8b1d93919f5d1ec919edf66fae2865dc6c59c21b2b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://06c25adab1f8892fd242951d9e3562ab36f906d3bda884f0575dbe82709f8fff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:14Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.413026 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8cjvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6795986-350e-4a46-8196-eb5cb3ba018e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb7b55ad0f3339629fe238133377a30b606d0550852a0b8bb1580fb0f779083c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sqv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:40Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8cjvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.425436 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.435317 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vpmmf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68cd537c-0a3f-4835-83bc-a45ae449712b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7da70eb9f4f438013e77ee8837e5a6b2273ed4f7b01bd1029a1735ac89744a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wkpst\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vpmmf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.452839 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:34Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.468015 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.468075 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.468089 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.468112 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.468125 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.493297 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vs8mc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e924d34-8790-41e8-a11a-91a1d0c625ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:25Z\\\",\\\"message\\\":\\\"2025-09-29T13:41:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f\\\\n2025-09-29T13:41:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7a5400f8-965d-44e2-8a33-484e297ebe1f to /host/opt/cni/bin/\\\\n2025-09-29T13:41:40Z [verbose] multus-daemon started\\\\n2025-09-29T13:41:40Z [verbose] Readiness Indicator file check\\\\n2025-09-29T13:42:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x49dh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vs8mc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.539938 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gsck4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db4e9fcf-5399-4029-9409-22b45496d7c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0874c4f7e8a5b6c2f98282e1bb8a81db80b1f42e3e6fb7c878dc5dcee3518e8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45b57bbc078af76065485bcb8b40c922c0842200a3ac05986b544126d95bf567\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4788e737a5d5fe5f820b1fb8c00fd18f0260d0bc2e5d49c5d21aae594c104608\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4378b8046ada09418985192063ae1d47fc59dbf33986a992dfe6df83850e30d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb30e40cac3b74fbd5bec26ffefe96854685ca8f792f32ba4b51064d69ac8d18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5096aff58cbd16582d9d99387403c14f4838a29a30d34cc359123b62fc10c043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3ae705d5d58f1cbc4d9b30c4f99404c646f8a154109ac384416876ab6e6cd44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd462\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gsck4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.552466 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2cb4b77-d447-4866-ac1e-eb4f0b4babae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e635e6e060570a429f377f6f06dc39cba5a575b3cf1df964ede9e6a2b897a892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z62t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-mrhp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.570483 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.570558 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.570577 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.570649 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.570664 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.574756 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d03c451-25ce-46f9-9a14-f2ee29a89521\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9de6d7bc1e26b0d87b4e94c662ef830580ea98c5ecb184798a1392585e115fb5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:09Z\\\",\\\"message\\\":\\\"de event handler 2\\\\nI0929 13:42:09.121366 6531 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121447 6531 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0929 13:42:09.121472 6531 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0929 13:42:09.121585 6531 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0929 13:42:09.121673 6531 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0929 13:42:09.121675 6531 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0929 13:42:09.121753 6531 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0929 13:42:09.121809 6531 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0929 13:42:09.121843 6531 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0929 13:42:09.121851 6531 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0929 13:42:09.121938 6531 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0929 13:42:09.121988 6531 factory.go:656] Stopping watch factory\\\\nI0929 13:42:09.122017 6531 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0929 13:42:09.122086 6531 ovnkube.go:599] Stopped ovnkube\\\\nI0929 13:42:09.122180 6531 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0929 13:42:09.122384 6531 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-09-29T13:42:38Z\\\",\\\"message\\\":\\\"e (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:42:38.394840 6932 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0929 13:42:38.394894 6932 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0929 13:42:38.394904 6932 factory.go:656] Stopping watch factory\\\\nI0929 13:42:38.394936 6932 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0929 13:42:38.394990 6932 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:42:38.395147 6932 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:42:38.395295 6932 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0929 13:42:38.395567 6932 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-09-29T13:42:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-09-29T13:41:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-09-29T13:41:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-btwpm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mx9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.587582 4869 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9bccd33-a790-4ed3-a942-b08394a00913\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-09-29T13:41:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://634b55851f6e2940053d0eba32e32eed4e2f444cd0baac71b71541653b9a0b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d5f560dfd8afc03cbac19a6c80cf7ade9540e3b9b453fdfe8258bd8de89a671\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-09-29T13:41:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fh6wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-09-29T13:41:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-q4mtr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-09-29T13:42:44Z is after 2025-08-24T17:21:41Z" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.673473 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.673559 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.673591 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.673673 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.673702 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.777126 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.777168 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.777180 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.777197 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.777211 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.880205 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.880306 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.880361 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.880391 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.880408 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.983836 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.983909 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.983927 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.983950 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:44 crc kubenswrapper[4869]: I0929 13:42:44.983973 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:44Z","lastTransitionTime":"2025-09-29T13:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.087118 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.087256 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.087272 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.087299 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.087313 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.190500 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.190540 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.190555 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.190571 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.190582 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.240939 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:45 crc kubenswrapper[4869]: E0929 13:42:45.241085 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.293915 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.293990 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.294009 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.294037 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.294055 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.397414 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.397465 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.397475 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.397494 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.397505 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.500755 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.500818 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.500834 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.500854 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.500867 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.604676 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.604736 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.604759 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.604786 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.604800 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.708096 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.708173 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.708201 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.708233 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.708256 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.811348 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.811409 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.811431 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.811458 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.811473 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.914198 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.914310 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.914342 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.914373 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:45 crc kubenswrapper[4869]: I0929 13:42:45.914396 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:45Z","lastTransitionTime":"2025-09-29T13:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.017878 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.017970 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.018005 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.018035 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.018058 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.121847 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.121896 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.121908 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.121926 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.121940 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.225834 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.225884 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.225893 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.225910 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.225922 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.241298 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.241374 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.241313 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:46 crc kubenswrapper[4869]: E0929 13:42:46.241506 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:46 crc kubenswrapper[4869]: E0929 13:42:46.241642 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:46 crc kubenswrapper[4869]: E0929 13:42:46.241732 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.329588 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.330129 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.330393 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.330554 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.331014 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.434694 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.434746 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.434758 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.434777 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.434792 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.538344 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.538398 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.538416 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.538438 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.538452 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.641212 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.641306 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.641333 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.641364 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.641382 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.744403 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.744826 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.745003 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.745194 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.745393 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.849016 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.849082 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.849096 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.849123 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.849140 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.952564 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.952643 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.952662 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.952681 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:46 crc kubenswrapper[4869]: I0929 13:42:46.952692 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:46Z","lastTransitionTime":"2025-09-29T13:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.055969 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.056048 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.056062 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.056081 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.056094 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.159947 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.160019 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.160037 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.160062 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.160081 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.241374 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:47 crc kubenswrapper[4869]: E0929 13:42:47.241591 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.262728 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.263163 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.263491 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.263766 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.263994 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.367497 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.367560 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.367572 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.367596 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.367621 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.470854 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.471342 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.471558 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.471765 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.471919 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.575387 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.575707 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.575745 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.575767 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.575785 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.678835 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.678914 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.678939 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.678978 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.679002 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.783219 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.783785 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.784071 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.784277 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.784494 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.887472 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.887893 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.887990 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.888079 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.888140 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.992297 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.992353 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.992366 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.992395 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:47 crc kubenswrapper[4869]: I0929 13:42:47.992408 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:47Z","lastTransitionTime":"2025-09-29T13:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.096683 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.096740 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.096752 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.096770 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.096788 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.199689 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.199745 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.199759 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.199780 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.199792 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.241344 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.241426 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:48 crc kubenswrapper[4869]: E0929 13:42:48.242198 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:48 crc kubenswrapper[4869]: E0929 13:42:48.241945 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.241445 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:48 crc kubenswrapper[4869]: E0929 13:42:48.242502 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.303368 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.303418 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.303433 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.303457 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.303474 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.407312 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.407363 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.407374 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.407393 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.407406 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.515507 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.515997 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.516143 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.516357 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.516593 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.619799 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.619854 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.619868 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.619886 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.619898 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.723711 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.723772 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.723797 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.723827 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.723850 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.828029 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.828068 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.828077 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.828096 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.828107 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.932071 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.932135 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.932148 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.932167 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:48 crc kubenswrapper[4869]: I0929 13:42:48.932181 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:48Z","lastTransitionTime":"2025-09-29T13:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.034858 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.034923 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.034945 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.035003 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.035022 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.138702 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.138745 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.138758 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.138776 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.138788 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.240856 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:49 crc kubenswrapper[4869]: E0929 13:42:49.240973 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.241520 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.241549 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.241559 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.241572 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.241584 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.382113 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.382158 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.382173 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.382193 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.382206 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.484843 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.484886 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.484898 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.485274 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.485304 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.587906 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.587948 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.587958 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.587973 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.587985 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.691246 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.691298 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.691316 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.691337 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.691350 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.795007 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.795081 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.795093 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.795110 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.795122 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.897979 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.898025 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.898041 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.898058 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:49 crc kubenswrapper[4869]: I0929 13:42:49.898069 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:49Z","lastTransitionTime":"2025-09-29T13:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.001334 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.001381 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.001395 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.001417 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.001434 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.105224 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.105277 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.105287 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.105305 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.105319 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.208022 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.208064 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.208073 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.208087 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.208096 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.241886 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.241971 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.242034 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:50 crc kubenswrapper[4869]: E0929 13:42:50.242115 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:50 crc kubenswrapper[4869]: E0929 13:42:50.242263 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:50 crc kubenswrapper[4869]: E0929 13:42:50.242385 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.311461 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.311544 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.311579 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.311662 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.311692 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.414955 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.415022 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.415037 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.415062 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.415077 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.518717 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.518797 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.518826 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.518858 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.518880 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.621851 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.621915 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.621928 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.621948 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.621965 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.725340 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.725409 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.725424 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.725443 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.725456 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.828329 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.828382 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.828393 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.828408 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.828421 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.931401 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.931461 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.931477 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.931498 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:50 crc kubenswrapper[4869]: I0929 13:42:50.931512 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:50Z","lastTransitionTime":"2025-09-29T13:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.034539 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.034651 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.034667 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.034689 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.034703 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:51Z","lastTransitionTime":"2025-09-29T13:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.137968 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.138072 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.138085 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.138103 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.138133 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:51Z","lastTransitionTime":"2025-09-29T13:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.240957 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:51 crc kubenswrapper[4869]: E0929 13:42:51.241185 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.244564 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.244760 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.244803 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.244830 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.244888 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:51Z","lastTransitionTime":"2025-09-29T13:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.349179 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.349248 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.349263 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.349284 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.349296 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:51Z","lastTransitionTime":"2025-09-29T13:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.451878 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.451925 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.451942 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.451961 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.451973 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:51Z","lastTransitionTime":"2025-09-29T13:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.555077 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.555120 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.555131 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.555147 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.555157 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:51Z","lastTransitionTime":"2025-09-29T13:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.658212 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.658283 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.658305 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.658335 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.658354 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:51Z","lastTransitionTime":"2025-09-29T13:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.752501 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.752547 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.752560 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.752579 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.752596 4869 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-09-29T13:42:51Z","lastTransitionTime":"2025-09-29T13:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.814664 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh"] Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.815117 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.817255 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.818338 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.818656 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.819180 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.858419 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-vpmmf" podStartSLOduration=74.858393228 podStartE2EDuration="1m14.858393228s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:51.844034901 +0000 UTC m=+98.284679221" watchObservedRunningTime="2025-09-29 13:42:51.858393228 +0000 UTC m=+98.299037548" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.878506 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-vs8mc" podStartSLOduration=73.878484313 podStartE2EDuration="1m13.878484313s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:51.873633799 +0000 UTC m=+98.314278119" watchObservedRunningTime="2025-09-29 13:42:51.878484313 +0000 UTC m=+98.319128633" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.908955 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e406aa95-a903-4efa-888f-beeca4b69339-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.909013 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e406aa95-a903-4efa-888f-beeca4b69339-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.909071 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e406aa95-a903-4efa-888f-beeca4b69339-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.909107 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e406aa95-a903-4efa-888f-beeca4b69339-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.909134 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e406aa95-a903-4efa-888f-beeca4b69339-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.920082 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-gsck4" podStartSLOduration=73.920051758 podStartE2EDuration="1m13.920051758s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:51.903578076 +0000 UTC m=+98.344222406" watchObservedRunningTime="2025-09-29 13:42:51.920051758 +0000 UTC m=+98.360696078" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.946758 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podStartSLOduration=73.946730901 podStartE2EDuration="1m13.946730901s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:51.920217502 +0000 UTC m=+98.360861822" watchObservedRunningTime="2025-09-29 13:42:51.946730901 +0000 UTC m=+98.387375221" Sep 29 13:42:51 crc kubenswrapper[4869]: I0929 13:42:51.963166 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-q4mtr" podStartSLOduration=73.963141991 podStartE2EDuration="1m13.963141991s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:51.96310321 +0000 UTC m=+98.403747530" watchObservedRunningTime="2025-09-29 13:42:51.963141991 +0000 UTC m=+98.403786311" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.001350 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=78.001323909 podStartE2EDuration="1m18.001323909s" podCreationTimestamp="2025-09-29 13:41:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:51.984262482 +0000 UTC m=+98.424906802" watchObservedRunningTime="2025-09-29 13:42:52.001323909 +0000 UTC m=+98.441968239" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.010476 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e406aa95-a903-4efa-888f-beeca4b69339-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.010522 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e406aa95-a903-4efa-888f-beeca4b69339-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.010574 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e406aa95-a903-4efa-888f-beeca4b69339-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.010638 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e406aa95-a903-4efa-888f-beeca4b69339-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.010664 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e406aa95-a903-4efa-888f-beeca4b69339-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.010660 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e406aa95-a903-4efa-888f-beeca4b69339-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.011059 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e406aa95-a903-4efa-888f-beeca4b69339-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.011661 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e406aa95-a903-4efa-888f-beeca4b69339-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.022324 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e406aa95-a903-4efa-888f-beeca4b69339-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.027405 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e406aa95-a903-4efa-888f-beeca4b69339-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bvlrh\" (UID: \"e406aa95-a903-4efa-888f-beeca4b69339\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.128681 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=79.12865737 podStartE2EDuration="1m19.12865737s" podCreationTimestamp="2025-09-29 13:41:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:52.110921256 +0000 UTC m=+98.551565596" watchObservedRunningTime="2025-09-29 13:42:52.12865737 +0000 UTC m=+98.569301690" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.142754 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=47.142730491 podStartE2EDuration="47.142730491s" podCreationTimestamp="2025-09-29 13:42:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:52.129767489 +0000 UTC m=+98.570411809" watchObservedRunningTime="2025-09-29 13:42:52.142730491 +0000 UTC m=+98.583374811" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.144220 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.160259 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-8cjvt" podStartSLOduration=75.160239629 podStartE2EDuration="1m15.160239629s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:52.159252894 +0000 UTC m=+98.599897214" watchObservedRunningTime="2025-09-29 13:42:52.160239629 +0000 UTC m=+98.600883949" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.160740 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=29.160734952 podStartE2EDuration="29.160734952s" podCreationTimestamp="2025-09-29 13:42:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:52.143921561 +0000 UTC m=+98.584565881" watchObservedRunningTime="2025-09-29 13:42:52.160734952 +0000 UTC m=+98.601379272" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.241808 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.241912 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.241912 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:52 crc kubenswrapper[4869]: E0929 13:42:52.242084 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:52 crc kubenswrapper[4869]: E0929 13:42:52.242302 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:52 crc kubenswrapper[4869]: E0929 13:42:52.242349 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.811461 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" event={"ID":"e406aa95-a903-4efa-888f-beeca4b69339","Type":"ContainerStarted","Data":"ed7dcb2fbd9f45e7556ba45d23b2861335d128cb9c1745578332d64eb1a52618"} Sep 29 13:42:52 crc kubenswrapper[4869]: I0929 13:42:52.811587 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" event={"ID":"e406aa95-a903-4efa-888f-beeca4b69339","Type":"ContainerStarted","Data":"99c5a82c43fd9832c7501432fb2deae5145c78eb95f9a4e0fec4d540d8a91cd5"} Sep 29 13:42:53 crc kubenswrapper[4869]: I0929 13:42:53.241900 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:53 crc kubenswrapper[4869]: E0929 13:42:53.242442 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:54 crc kubenswrapper[4869]: I0929 13:42:54.242358 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:54 crc kubenswrapper[4869]: I0929 13:42:54.242808 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:54 crc kubenswrapper[4869]: E0929 13:42:54.242973 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:54 crc kubenswrapper[4869]: I0929 13:42:54.243004 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:54 crc kubenswrapper[4869]: E0929 13:42:54.243222 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:54 crc kubenswrapper[4869]: E0929 13:42:54.243336 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:54 crc kubenswrapper[4869]: I0929 13:42:54.244070 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:42:54 crc kubenswrapper[4869]: E0929 13:42:54.244348 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" Sep 29 13:42:54 crc kubenswrapper[4869]: I0929 13:42:54.296186 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bvlrh" podStartSLOduration=77.296160843 podStartE2EDuration="1m17.296160843s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:42:52.830355221 +0000 UTC m=+99.270999541" watchObservedRunningTime="2025-09-29 13:42:54.296160843 +0000 UTC m=+100.736805163" Sep 29 13:42:55 crc kubenswrapper[4869]: I0929 13:42:55.241706 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:55 crc kubenswrapper[4869]: E0929 13:42:55.241920 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:56 crc kubenswrapper[4869]: I0929 13:42:56.241738 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:56 crc kubenswrapper[4869]: I0929 13:42:56.241832 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:56 crc kubenswrapper[4869]: I0929 13:42:56.241798 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:56 crc kubenswrapper[4869]: E0929 13:42:56.242063 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:56 crc kubenswrapper[4869]: E0929 13:42:56.242121 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:56 crc kubenswrapper[4869]: E0929 13:42:56.242213 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:56 crc kubenswrapper[4869]: I0929 13:42:56.361185 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:56 crc kubenswrapper[4869]: E0929 13:42:56.361447 4869 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:42:56 crc kubenswrapper[4869]: E0929 13:42:56.361581 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs podName:9d791a01-f367-41f9-bd94-a7cee0b4b7c7 nodeName:}" failed. No retries permitted until 2025-09-29 13:44:00.361556171 +0000 UTC m=+166.802200491 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs") pod "network-metrics-daemon-mxqkf" (UID: "9d791a01-f367-41f9-bd94-a7cee0b4b7c7") : object "openshift-multus"/"metrics-daemon-secret" not registered Sep 29 13:42:57 crc kubenswrapper[4869]: I0929 13:42:57.240802 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:57 crc kubenswrapper[4869]: E0929 13:42:57.240954 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:58 crc kubenswrapper[4869]: I0929 13:42:58.241582 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:42:58 crc kubenswrapper[4869]: E0929 13:42:58.242709 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:42:58 crc kubenswrapper[4869]: I0929 13:42:58.242135 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:42:58 crc kubenswrapper[4869]: E0929 13:42:58.243020 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:42:58 crc kubenswrapper[4869]: I0929 13:42:58.241779 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:42:58 crc kubenswrapper[4869]: E0929 13:42:58.243274 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:42:59 crc kubenswrapper[4869]: I0929 13:42:59.241277 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:42:59 crc kubenswrapper[4869]: E0929 13:42:59.241863 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:42:59 crc kubenswrapper[4869]: I0929 13:42:59.263160 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Sep 29 13:43:00 crc kubenswrapper[4869]: I0929 13:43:00.241155 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:00 crc kubenswrapper[4869]: I0929 13:43:00.241216 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:00 crc kubenswrapper[4869]: I0929 13:43:00.241167 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:00 crc kubenswrapper[4869]: E0929 13:43:00.241312 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:00 crc kubenswrapper[4869]: E0929 13:43:00.241383 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:00 crc kubenswrapper[4869]: E0929 13:43:00.241516 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:01 crc kubenswrapper[4869]: I0929 13:43:01.241392 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:01 crc kubenswrapper[4869]: E0929 13:43:01.242106 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:02 crc kubenswrapper[4869]: I0929 13:43:02.241153 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:02 crc kubenswrapper[4869]: I0929 13:43:02.241233 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:02 crc kubenswrapper[4869]: I0929 13:43:02.241325 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:02 crc kubenswrapper[4869]: E0929 13:43:02.241342 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:02 crc kubenswrapper[4869]: E0929 13:43:02.241543 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:02 crc kubenswrapper[4869]: E0929 13:43:02.241647 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:03 crc kubenswrapper[4869]: I0929 13:43:03.240895 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:03 crc kubenswrapper[4869]: E0929 13:43:03.241054 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:04 crc kubenswrapper[4869]: I0929 13:43:04.241178 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:04 crc kubenswrapper[4869]: I0929 13:43:04.241317 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:04 crc kubenswrapper[4869]: E0929 13:43:04.242521 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:04 crc kubenswrapper[4869]: I0929 13:43:04.242537 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:04 crc kubenswrapper[4869]: E0929 13:43:04.242715 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:04 crc kubenswrapper[4869]: E0929 13:43:04.242832 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:04 crc kubenswrapper[4869]: I0929 13:43:04.283319 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=5.28329141 podStartE2EDuration="5.28329141s" podCreationTimestamp="2025-09-29 13:42:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:04.282376527 +0000 UTC m=+110.723020847" watchObservedRunningTime="2025-09-29 13:43:04.28329141 +0000 UTC m=+110.723935740" Sep 29 13:43:05 crc kubenswrapper[4869]: I0929 13:43:05.241548 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:05 crc kubenswrapper[4869]: E0929 13:43:05.241760 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:06 crc kubenswrapper[4869]: I0929 13:43:06.241063 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:06 crc kubenswrapper[4869]: I0929 13:43:06.241116 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:06 crc kubenswrapper[4869]: I0929 13:43:06.241259 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:06 crc kubenswrapper[4869]: E0929 13:43:06.241303 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:06 crc kubenswrapper[4869]: E0929 13:43:06.241492 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:06 crc kubenswrapper[4869]: E0929 13:43:06.241623 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:07 crc kubenswrapper[4869]: I0929 13:43:07.241546 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:07 crc kubenswrapper[4869]: E0929 13:43:07.241879 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:07 crc kubenswrapper[4869]: I0929 13:43:07.243552 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:43:07 crc kubenswrapper[4869]: E0929 13:43:07.243900 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mx9tj_openshift-ovn-kubernetes(5d03c451-25ce-46f9-9a14-f2ee29a89521)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" Sep 29 13:43:08 crc kubenswrapper[4869]: I0929 13:43:08.241462 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:08 crc kubenswrapper[4869]: I0929 13:43:08.241509 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:08 crc kubenswrapper[4869]: E0929 13:43:08.241653 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:08 crc kubenswrapper[4869]: E0929 13:43:08.241800 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:08 crc kubenswrapper[4869]: I0929 13:43:08.242842 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:08 crc kubenswrapper[4869]: E0929 13:43:08.243107 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:09 crc kubenswrapper[4869]: I0929 13:43:09.241596 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:09 crc kubenswrapper[4869]: E0929 13:43:09.241903 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:10 crc kubenswrapper[4869]: I0929 13:43:10.240880 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:10 crc kubenswrapper[4869]: I0929 13:43:10.240922 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:10 crc kubenswrapper[4869]: I0929 13:43:10.241090 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:10 crc kubenswrapper[4869]: E0929 13:43:10.241630 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:10 crc kubenswrapper[4869]: E0929 13:43:10.241761 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:10 crc kubenswrapper[4869]: E0929 13:43:10.241883 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:11 crc kubenswrapper[4869]: I0929 13:43:11.241520 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:11 crc kubenswrapper[4869]: E0929 13:43:11.241692 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.240713 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.240765 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.240812 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:12 crc kubenswrapper[4869]: E0929 13:43:12.240858 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:12 crc kubenswrapper[4869]: E0929 13:43:12.240970 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:12 crc kubenswrapper[4869]: E0929 13:43:12.241058 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.885020 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/1.log" Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.885785 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/0.log" Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.885864 4869 generic.go:334] "Generic (PLEG): container finished" podID="0e924d34-8790-41e8-a11a-91a1d0c625ca" containerID="c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436" exitCode=1 Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.885919 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vs8mc" event={"ID":"0e924d34-8790-41e8-a11a-91a1d0c625ca","Type":"ContainerDied","Data":"c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436"} Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.886023 4869 scope.go:117] "RemoveContainer" containerID="599d6c8de246ca2f9da6f0f53d2ec81466d4e7907022e2d682187d60b4b6f2c6" Sep 29 13:43:12 crc kubenswrapper[4869]: I0929 13:43:12.886881 4869 scope.go:117] "RemoveContainer" containerID="c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436" Sep 29 13:43:12 crc kubenswrapper[4869]: E0929 13:43:12.887330 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-vs8mc_openshift-multus(0e924d34-8790-41e8-a11a-91a1d0c625ca)\"" pod="openshift-multus/multus-vs8mc" podUID="0e924d34-8790-41e8-a11a-91a1d0c625ca" Sep 29 13:43:13 crc kubenswrapper[4869]: I0929 13:43:13.241262 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:13 crc kubenswrapper[4869]: E0929 13:43:13.241440 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:13 crc kubenswrapper[4869]: I0929 13:43:13.890768 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/1.log" Sep 29 13:43:14 crc kubenswrapper[4869]: E0929 13:43:14.199919 4869 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Sep 29 13:43:14 crc kubenswrapper[4869]: I0929 13:43:14.241275 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:14 crc kubenswrapper[4869]: I0929 13:43:14.241337 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:14 crc kubenswrapper[4869]: E0929 13:43:14.242753 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:14 crc kubenswrapper[4869]: I0929 13:43:14.242783 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:14 crc kubenswrapper[4869]: E0929 13:43:14.242914 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:14 crc kubenswrapper[4869]: E0929 13:43:14.243242 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:14 crc kubenswrapper[4869]: E0929 13:43:14.386248 4869 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 29 13:43:15 crc kubenswrapper[4869]: I0929 13:43:15.241018 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:15 crc kubenswrapper[4869]: E0929 13:43:15.241764 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:16 crc kubenswrapper[4869]: I0929 13:43:16.241801 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:16 crc kubenswrapper[4869]: I0929 13:43:16.241811 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:16 crc kubenswrapper[4869]: I0929 13:43:16.241801 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:16 crc kubenswrapper[4869]: E0929 13:43:16.241972 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:16 crc kubenswrapper[4869]: E0929 13:43:16.242203 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:16 crc kubenswrapper[4869]: E0929 13:43:16.242089 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:17 crc kubenswrapper[4869]: I0929 13:43:17.240784 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:17 crc kubenswrapper[4869]: E0929 13:43:17.240967 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:18 crc kubenswrapper[4869]: I0929 13:43:18.240863 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:18 crc kubenswrapper[4869]: I0929 13:43:18.240912 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:18 crc kubenswrapper[4869]: E0929 13:43:18.241051 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:18 crc kubenswrapper[4869]: I0929 13:43:18.241095 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:18 crc kubenswrapper[4869]: E0929 13:43:18.241249 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:18 crc kubenswrapper[4869]: E0929 13:43:18.241503 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:19 crc kubenswrapper[4869]: I0929 13:43:19.241154 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:19 crc kubenswrapper[4869]: E0929 13:43:19.241699 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:19 crc kubenswrapper[4869]: E0929 13:43:19.387791 4869 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 29 13:43:20 crc kubenswrapper[4869]: I0929 13:43:20.243829 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:20 crc kubenswrapper[4869]: I0929 13:43:20.243878 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:20 crc kubenswrapper[4869]: I0929 13:43:20.243844 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:20 crc kubenswrapper[4869]: E0929 13:43:20.244042 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:20 crc kubenswrapper[4869]: E0929 13:43:20.244331 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:20 crc kubenswrapper[4869]: E0929 13:43:20.244237 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:21 crc kubenswrapper[4869]: I0929 13:43:21.241485 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:21 crc kubenswrapper[4869]: E0929 13:43:21.241733 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:21 crc kubenswrapper[4869]: I0929 13:43:21.242569 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:43:21 crc kubenswrapper[4869]: I0929 13:43:21.921635 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/3.log" Sep 29 13:43:21 crc kubenswrapper[4869]: I0929 13:43:21.924630 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerStarted","Data":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} Sep 29 13:43:21 crc kubenswrapper[4869]: I0929 13:43:21.925051 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:43:21 crc kubenswrapper[4869]: I0929 13:43:21.957970 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podStartSLOduration=103.957946402 podStartE2EDuration="1m43.957946402s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:21.957914761 +0000 UTC m=+128.398559101" watchObservedRunningTime="2025-09-29 13:43:21.957946402 +0000 UTC m=+128.398590722" Sep 29 13:43:22 crc kubenswrapper[4869]: I0929 13:43:22.169181 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-mxqkf"] Sep 29 13:43:22 crc kubenswrapper[4869]: I0929 13:43:22.169345 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:22 crc kubenswrapper[4869]: E0929 13:43:22.169471 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:22 crc kubenswrapper[4869]: I0929 13:43:22.241183 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:22 crc kubenswrapper[4869]: I0929 13:43:22.241236 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:22 crc kubenswrapper[4869]: E0929 13:43:22.241433 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:22 crc kubenswrapper[4869]: E0929 13:43:22.241647 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:23 crc kubenswrapper[4869]: I0929 13:43:23.241241 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:23 crc kubenswrapper[4869]: I0929 13:43:23.241241 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:23 crc kubenswrapper[4869]: E0929 13:43:23.241481 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:23 crc kubenswrapper[4869]: E0929 13:43:23.241522 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:24 crc kubenswrapper[4869]: I0929 13:43:24.242736 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:24 crc kubenswrapper[4869]: E0929 13:43:24.242858 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:24 crc kubenswrapper[4869]: I0929 13:43:24.243130 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:24 crc kubenswrapper[4869]: E0929 13:43:24.243321 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:24 crc kubenswrapper[4869]: E0929 13:43:24.389309 4869 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Sep 29 13:43:25 crc kubenswrapper[4869]: I0929 13:43:25.241870 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:25 crc kubenswrapper[4869]: I0929 13:43:25.241895 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:25 crc kubenswrapper[4869]: E0929 13:43:25.242141 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:25 crc kubenswrapper[4869]: E0929 13:43:25.242296 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:26 crc kubenswrapper[4869]: I0929 13:43:26.241054 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:26 crc kubenswrapper[4869]: E0929 13:43:26.241258 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:26 crc kubenswrapper[4869]: I0929 13:43:26.241800 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:26 crc kubenswrapper[4869]: E0929 13:43:26.241979 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:27 crc kubenswrapper[4869]: I0929 13:43:27.241707 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:27 crc kubenswrapper[4869]: I0929 13:43:27.241707 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:27 crc kubenswrapper[4869]: E0929 13:43:27.241850 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:27 crc kubenswrapper[4869]: E0929 13:43:27.241943 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:27 crc kubenswrapper[4869]: I0929 13:43:27.242201 4869 scope.go:117] "RemoveContainer" containerID="c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436" Sep 29 13:43:27 crc kubenswrapper[4869]: I0929 13:43:27.946111 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/1.log" Sep 29 13:43:27 crc kubenswrapper[4869]: I0929 13:43:27.946460 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vs8mc" event={"ID":"0e924d34-8790-41e8-a11a-91a1d0c625ca","Type":"ContainerStarted","Data":"efaa387899245c600fed4d298ba5d28dcdcd5f98768bb43a8cd8c078362e1d2c"} Sep 29 13:43:28 crc kubenswrapper[4869]: I0929 13:43:28.241158 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:28 crc kubenswrapper[4869]: I0929 13:43:28.241284 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:28 crc kubenswrapper[4869]: E0929 13:43:28.241345 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Sep 29 13:43:28 crc kubenswrapper[4869]: E0929 13:43:28.241545 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Sep 29 13:43:29 crc kubenswrapper[4869]: I0929 13:43:29.241738 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:29 crc kubenswrapper[4869]: I0929 13:43:29.241770 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:29 crc kubenswrapper[4869]: E0929 13:43:29.241890 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mxqkf" podUID="9d791a01-f367-41f9-bd94-a7cee0b4b7c7" Sep 29 13:43:29 crc kubenswrapper[4869]: E0929 13:43:29.242038 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Sep 29 13:43:30 crc kubenswrapper[4869]: I0929 13:43:30.241289 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:30 crc kubenswrapper[4869]: I0929 13:43:30.241371 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:30 crc kubenswrapper[4869]: I0929 13:43:30.244357 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Sep 29 13:43:30 crc kubenswrapper[4869]: I0929 13:43:30.246101 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Sep 29 13:43:30 crc kubenswrapper[4869]: I0929 13:43:30.248072 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Sep 29 13:43:30 crc kubenswrapper[4869]: I0929 13:43:30.249049 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Sep 29 13:43:31 crc kubenswrapper[4869]: I0929 13:43:31.240862 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:43:31 crc kubenswrapper[4869]: I0929 13:43:31.241148 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:31 crc kubenswrapper[4869]: I0929 13:43:31.244433 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Sep 29 13:43:31 crc kubenswrapper[4869]: I0929 13:43:31.246324 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.572472 4869 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.619962 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.620749 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.620857 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-m2zpm"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.621508 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.623044 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vhbp8"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.623459 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.627718 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qqgzh"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.628283 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-s2kxs"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.628777 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.629130 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.629707 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.629982 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.632215 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.632700 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.632865 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.633236 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.633691 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.633917 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.634100 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.634264 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.634427 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.634601 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.634812 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.635369 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.635553 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.635735 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.635961 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.636122 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.636730 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.637005 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.637185 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.637391 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.637874 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.640969 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.641932 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.652090 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.653470 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.654183 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.661135 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.684382 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-b8gtg"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.684990 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-k2ml4"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.685309 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.685670 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.685987 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-b8gtg" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.686157 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.687370 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.687509 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.687684 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.687930 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.687964 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688014 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688057 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-client-ca\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688072 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688085 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-audit-dir\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688109 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkv89\" (UniqueName: \"kubernetes.io/projected/5bca4dbe-67e9-4619-b821-5b187f159ed9-kube-api-access-xkv89\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688136 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5bca4dbe-67e9-4619-b821-5b187f159ed9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688159 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5bca4dbe-67e9-4619-b821-5b187f159ed9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688181 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2000633-937f-4146-8721-cc475e3e930b-auth-proxy-config\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688202 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb3f0a27-5821-43ba-bc08-482dceab1b74-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688223 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4bxj\" (UniqueName: \"kubernetes.io/projected/2aa3898a-a2ad-4e76-8927-337e20298d02-kube-api-access-b4bxj\") pod \"cluster-samples-operator-665b6dd947-lnc9f\" (UID: \"2aa3898a-a2ad-4e76-8927-337e20298d02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688248 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-encryption-config\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688263 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688286 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7ac0775-6bc1-470a-b7cd-e769a213122e-serving-cert\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688313 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2000633-937f-4146-8721-cc475e3e930b-config\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688338 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-serving-cert\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688360 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsqbw\" (UniqueName: \"kubernetes.io/projected/ac14296d-ce5b-4b73-84f6-3f39e3280f26-kube-api-access-qsqbw\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688382 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btf6k\" (UniqueName: \"kubernetes.io/projected/cabc3961-9121-416f-962c-80558b14a820-kube-api-access-btf6k\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688403 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-config\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688421 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688441 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26hkc\" (UniqueName: \"kubernetes.io/projected/d2000633-937f-4146-8721-cc475e3e930b-kube-api-access-26hkc\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688465 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-oauth-serving-cert\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688487 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-service-ca-bundle\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688511 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-image-import-ca\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688535 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d2000633-937f-4146-8721-cc475e3e930b-machine-approver-tls\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688561 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-oauth-config\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688582 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-serving-cert\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688604 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96ldk\" (UniqueName: \"kubernetes.io/projected/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-kube-api-access-96ldk\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688648 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-audit\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688673 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-node-pullsecrets\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688695 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbv4m\" (UniqueName: \"kubernetes.io/projected/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-kube-api-access-zbv4m\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688731 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-service-ca\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688752 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/2aa3898a-a2ad-4e76-8927-337e20298d02-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-lnc9f\" (UID: \"2aa3898a-a2ad-4e76-8927-337e20298d02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688775 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-config\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688796 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-trusted-ca-bundle\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688820 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8k2h\" (UniqueName: \"kubernetes.io/projected/f7ac0775-6bc1-470a-b7cd-e769a213122e-kube-api-access-q8k2h\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688845 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-serving-cert\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688865 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb3f0a27-5821-43ba-bc08-482dceab1b74-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688904 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cabc3961-9121-416f-962c-80558b14a820-serving-cert\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688926 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cabc3961-9121-416f-962c-80558b14a820-available-featuregates\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688946 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-etcd-serving-ca\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688968 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bca4dbe-67e9-4619-b821-5b187f159ed9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688997 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-config\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689019 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-etcd-client\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689042 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25jrc\" (UniqueName: \"kubernetes.io/projected/eb3f0a27-5821-43ba-bc08-482dceab1b74-kube-api-access-25jrc\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689064 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-config\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689139 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t7kkw"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689771 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690323 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690463 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688389 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.688926 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689017 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689249 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689286 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689320 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689420 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689451 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689672 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689725 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.689854 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690142 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690168 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690193 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690244 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690296 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690298 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690343 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.690494 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.691545 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.692411 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.698937 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.699242 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.699598 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.699855 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.699984 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.700390 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.702546 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.705406 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.706311 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-7trbl"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.706762 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cl5ps"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.707132 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.707429 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.707812 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-k2q4s"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.708200 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.708720 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.708776 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-pq5fc"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.709337 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.711090 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m2zpm"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.711863 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.718287 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.727965 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.730646 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.731398 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.732627 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.732780 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.733427 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.734141 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.734465 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.734668 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.735345 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.735458 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.736240 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.736382 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.735463 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.740957 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.741075 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.741090 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.743849 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.746572 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.759069 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.759241 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.759309 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.759354 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.759844 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.759979 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.761332 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.761654 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.761894 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.763190 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.763365 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.763470 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.763689 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.764163 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.764284 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.764405 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.764526 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.764648 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.767969 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-zk45b"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.768582 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.764661 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.768896 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.764754 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.769082 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.765660 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.769491 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.769587 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.772542 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.772709 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.776036 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.777568 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.778684 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-2ghwn"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.779122 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.779678 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.780205 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.780668 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.780945 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.781245 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.782059 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.782280 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.784028 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.784786 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.785888 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.788148 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.789310 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.789725 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.790465 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-config\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.790568 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad6570a1-7f62-46b4-87e8-6ddea76c4101-config\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.790679 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.791640 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.790694 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-trusted-ca-bundle\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.791955 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8k2h\" (UniqueName: \"kubernetes.io/projected/f7ac0775-6bc1-470a-b7cd-e769a213122e-kube-api-access-q8k2h\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.791996 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m68lw\" (UniqueName: \"kubernetes.io/projected/d4c75357-03b7-433e-a45a-422bb2f54337-kube-api-access-m68lw\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792035 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-trusted-ca\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792062 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-client\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792088 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792117 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs2ht\" (UniqueName: \"kubernetes.io/projected/29edf770-4392-4e07-9aee-efa379244b2d-kube-api-access-zs2ht\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792148 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-serving-cert\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792172 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb3f0a27-5821-43ba-bc08-482dceab1b74-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792195 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-tmpfs\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792219 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-apiservice-cert\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792280 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cabc3961-9121-416f-962c-80558b14a820-serving-cert\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792305 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792333 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d76b2fd-25de-456f-9217-f4585285248c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792357 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm6fm\" (UniqueName: \"kubernetes.io/projected/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-kube-api-access-wm6fm\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792384 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cabc3961-9121-416f-962c-80558b14a820-available-featuregates\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792413 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-etcd-serving-ca\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792448 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bca4dbe-67e9-4619-b821-5b187f159ed9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792494 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792522 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792547 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-config\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792572 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-etcd-client\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792576 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-config\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792606 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ad6570a1-7f62-46b4-87e8-6ddea76c4101-images\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792658 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzg7n\" (UniqueName: \"kubernetes.io/projected/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-kube-api-access-vzg7n\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792690 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792722 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25jrc\" (UniqueName: \"kubernetes.io/projected/eb3f0a27-5821-43ba-bc08-482dceab1b74-kube-api-access-25jrc\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792749 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792777 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt4mr\" (UniqueName: \"kubernetes.io/projected/2d76b2fd-25de-456f-9217-f4585285248c-kube-api-access-pt4mr\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792803 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7db5686b-2de9-4e9b-99cc-80924f2c110a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792836 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-config\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792859 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-config\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792882 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792915 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8krnx\" (UniqueName: \"kubernetes.io/projected/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-kube-api-access-8krnx\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792954 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.792979 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-client-ca\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793004 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb0bf99c-6af7-48b3-b415-016a751f526e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-7qjv7\" (UID: \"eb0bf99c-6af7-48b3-b415-016a751f526e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793027 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-webhook-cert\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793055 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-audit-dir\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793084 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkv89\" (UniqueName: \"kubernetes.io/projected/5bca4dbe-67e9-4619-b821-5b187f159ed9-kube-api-access-xkv89\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793112 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-stats-auth\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793141 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5bca4dbe-67e9-4619-b821-5b187f159ed9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793168 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5bca4dbe-67e9-4619-b821-5b187f159ed9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793194 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-ca\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793219 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-service-ca\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793244 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms9nm\" (UniqueName: \"kubernetes.io/projected/7cdb33ce-d579-40e7-934b-94666c8e7c27-kube-api-access-ms9nm\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793291 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2000633-937f-4146-8721-cc475e3e930b-auth-proxy-config\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793319 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq2lv\" (UniqueName: \"kubernetes.io/projected/eb0bf99c-6af7-48b3-b415-016a751f526e-kube-api-access-zq2lv\") pod \"package-server-manager-789f6589d5-7qjv7\" (UID: \"eb0bf99c-6af7-48b3-b415-016a751f526e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793342 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-metrics-tls\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793371 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5d3f667-2064-442a-a0a7-899f35a00d9f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-bzpd6\" (UID: \"b5d3f667-2064-442a-a0a7-899f35a00d9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793401 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793425 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-config\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793454 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb3f0a27-5821-43ba-bc08-482dceab1b74-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793480 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4bxj\" (UniqueName: \"kubernetes.io/projected/2aa3898a-a2ad-4e76-8927-337e20298d02-kube-api-access-b4bxj\") pod \"cluster-samples-operator-665b6dd947-lnc9f\" (UID: \"2aa3898a-a2ad-4e76-8927-337e20298d02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793510 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-serving-cert\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793539 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-encryption-config\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793565 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-serving-cert\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793806 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7ac0775-6bc1-470a-b7cd-e769a213122e-serving-cert\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793840 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2000633-937f-4146-8721-cc475e3e930b-config\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793873 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-serving-cert\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793900 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d8cbc415-fef2-48a6-b66e-2afe6cd3be27-metrics-tls\") pod \"dns-operator-744455d44c-k2q4s\" (UID: \"d8cbc415-fef2-48a6-b66e-2afe6cd3be27\") " pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793940 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ftz8\" (UniqueName: \"kubernetes.io/projected/e47976d3-2e57-4943-a744-75ef0accd1ec-kube-api-access-5ftz8\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.793978 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsqbw\" (UniqueName: \"kubernetes.io/projected/ac14296d-ce5b-4b73-84f6-3f39e3280f26-kube-api-access-qsqbw\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794004 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-serving-cert\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794037 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794069 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btf6k\" (UniqueName: \"kubernetes.io/projected/cabc3961-9121-416f-962c-80558b14a820-kube-api-access-btf6k\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794098 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26hkc\" (UniqueName: \"kubernetes.io/projected/d2000633-937f-4146-8721-cc475e3e930b-kube-api-access-26hkc\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794125 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-encryption-config\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794156 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-config\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794184 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794211 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjqxw\" (UniqueName: \"kubernetes.io/projected/78378d8c-602a-4af2-ad85-c1e4330e959e-kube-api-access-hjqxw\") pod \"migrator-59844c95c7-h5swj\" (UID: \"78378d8c-602a-4af2-ad85-c1e4330e959e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794240 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29edf770-4392-4e07-9aee-efa379244b2d-config\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794267 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptmh5\" (UniqueName: \"kubernetes.io/projected/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-kube-api-access-ptmh5\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794301 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ad6570a1-7f62-46b4-87e8-6ddea76c4101-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794327 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794354 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d96sh\" (UniqueName: \"kubernetes.io/projected/b5d3f667-2064-442a-a0a7-899f35a00d9f-kube-api-access-d96sh\") pod \"control-plane-machine-set-operator-78cbb6b69f-bzpd6\" (UID: \"b5d3f667-2064-442a-a0a7-899f35a00d9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794381 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e47976d3-2e57-4943-a744-75ef0accd1ec-service-ca-bundle\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794415 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-oauth-serving-cert\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794443 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-service-ca-bundle\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794469 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhmsv\" (UniqueName: \"kubernetes.io/projected/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-kube-api-access-qhmsv\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794499 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794524 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-metrics-certs\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794551 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-image-import-ca\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794573 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-client-ca\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794600 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794667 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhhk6\" (UniqueName: \"kubernetes.io/projected/7db5686b-2de9-4e9b-99cc-80924f2c110a-kube-api-access-fhhk6\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794694 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-audit-policies\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794697 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794718 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmt8n\" (UniqueName: \"kubernetes.io/projected/d8cbc415-fef2-48a6-b66e-2afe6cd3be27-kube-api-access-gmt8n\") pod \"dns-operator-744455d44c-k2q4s\" (UID: \"d8cbc415-fef2-48a6-b66e-2afe6cd3be27\") " pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794744 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-config-volume\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794777 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d2000633-937f-4146-8721-cc475e3e930b-machine-approver-tls\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794791 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb3f0a27-5821-43ba-bc08-482dceab1b74-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794805 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4c75357-03b7-433e-a45a-422bb2f54337-serving-cert\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.794832 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-oauth-config\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795428 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-serving-cert\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795470 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96ldk\" (UniqueName: \"kubernetes.io/projected/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-kube-api-access-96ldk\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795506 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-config\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795538 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-node-pullsecrets\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795564 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-audit\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795592 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d76b2fd-25de-456f-9217-f4585285248c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795662 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbv4m\" (UniqueName: \"kubernetes.io/projected/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-kube-api-access-zbv4m\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795691 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795717 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29edf770-4392-4e07-9aee-efa379244b2d-serving-cert\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795741 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795766 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-etcd-client\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795797 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-service-ca\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795826 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/2aa3898a-a2ad-4e76-8927-337e20298d02-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-lnc9f\" (UID: \"2aa3898a-a2ad-4e76-8927-337e20298d02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795854 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9z68d\" (UniqueName: \"kubernetes.io/projected/ad6570a1-7f62-46b4-87e8-6ddea76c4101-kube-api-access-9z68d\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795891 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-policies\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795916 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-dir\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795943 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmcm6\" (UniqueName: \"kubernetes.io/projected/41996eeb-6d4e-45e1-b140-f2ff7f0bec29-kube-api-access-tmcm6\") pod \"downloads-7954f5f757-b8gtg\" (UID: \"41996eeb-6d4e-45e1-b140-f2ff7f0bec29\") " pod="openshift-console/downloads-7954f5f757-b8gtg" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795964 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7cdb33ce-d579-40e7-934b-94666c8e7c27-audit-dir\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795989 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-default-certificate\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.796027 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7db5686b-2de9-4e9b-99cc-80924f2c110a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.796947 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cabc3961-9121-416f-962c-80558b14a820-available-featuregates\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.797198 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-node-pullsecrets\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.797706 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.795767 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.798548 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-config\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.798698 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-trusted-ca-bundle\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.798792 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.799055 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-service-ca\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.800253 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-audit\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.802666 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bca4dbe-67e9-4619-b821-5b187f159ed9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.802860 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-etcd-client\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.803304 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cabc3961-9121-416f-962c-80558b14a820-serving-cert\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.803582 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-serving-cert\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.803657 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.803830 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb3f0a27-5821-43ba-bc08-482dceab1b74-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.804055 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.804244 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.804302 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.804905 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2000633-937f-4146-8721-cc475e3e930b-config\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.804922 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.804916 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-config\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.805330 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.805848 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-etcd-serving-ca\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.806917 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.807023 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.807578 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2000633-937f-4146-8721-cc475e3e930b-auth-proxy-config\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.808279 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-encryption-config\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.808376 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7ac0775-6bc1-470a-b7cd-e769a213122e-service-ca-bundle\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.808437 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t6hkv"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.809459 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.809858 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.809931 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.810404 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-serving-cert\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.811565 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.812516 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.813547 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-oauth-config\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.814409 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5bca4dbe-67e9-4619-b821-5b187f159ed9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.814747 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-oauth-serving-cert\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.814803 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.816442 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-client-ca\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.818129 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-config\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.818142 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/2aa3898a-a2ad-4e76-8927-337e20298d02-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-lnc9f\" (UID: \"2aa3898a-a2ad-4e76-8927-337e20298d02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.818720 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-audit-dir\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.819889 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7ac0775-6bc1-470a-b7cd-e769a213122e-serving-cert\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.820723 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qqgzh"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.820998 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.823533 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.823962 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-image-import-ca\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.829387 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.829662 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d2000633-937f-4146-8721-cc475e3e930b-machine-approver-tls\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.857259 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.857672 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-serving-cert\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.865202 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.865252 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t7kkw"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.866717 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.869011 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwxhh"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.870029 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.872345 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.875449 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.878283 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vhbp8"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.880932 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-b8gtg"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.883138 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-s2kxs"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.884001 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c76cl"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.884838 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.885577 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.887928 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-k2ml4"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.889990 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.892150 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-pq5fc"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.893449 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.895392 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897004 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897084 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-client-ca\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897191 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897218 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhhk6\" (UniqueName: \"kubernetes.io/projected/7db5686b-2de9-4e9b-99cc-80924f2c110a-kube-api-access-fhhk6\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897264 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4c75357-03b7-433e-a45a-422bb2f54337-serving-cert\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897284 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-audit-policies\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897304 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmt8n\" (UniqueName: \"kubernetes.io/projected/d8cbc415-fef2-48a6-b66e-2afe6cd3be27-kube-api-access-gmt8n\") pod \"dns-operator-744455d44c-k2q4s\" (UID: \"d8cbc415-fef2-48a6-b66e-2afe6cd3be27\") " pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897323 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-config-volume\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897343 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-config\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897382 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897400 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d76b2fd-25de-456f-9217-f4585285248c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897425 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29edf770-4392-4e07-9aee-efa379244b2d-serving-cert\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897442 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897457 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-etcd-client\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897476 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmcm6\" (UniqueName: \"kubernetes.io/projected/41996eeb-6d4e-45e1-b140-f2ff7f0bec29-kube-api-access-tmcm6\") pod \"downloads-7954f5f757-b8gtg\" (UID: \"41996eeb-6d4e-45e1-b140-f2ff7f0bec29\") " pod="openshift-console/downloads-7954f5f757-b8gtg" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897494 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9z68d\" (UniqueName: \"kubernetes.io/projected/ad6570a1-7f62-46b4-87e8-6ddea76c4101-kube-api-access-9z68d\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897510 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-policies\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897530 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-dir\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897549 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7cdb33ce-d579-40e7-934b-94666c8e7c27-audit-dir\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897566 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-default-certificate\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897583 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7db5686b-2de9-4e9b-99cc-80924f2c110a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897597 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad6570a1-7f62-46b4-87e8-6ddea76c4101-config\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897636 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m68lw\" (UniqueName: \"kubernetes.io/projected/d4c75357-03b7-433e-a45a-422bb2f54337-kube-api-access-m68lw\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897653 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-trusted-ca\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897675 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-tmpfs\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897691 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-apiservice-cert\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897704 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-client\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897721 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897742 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs2ht\" (UniqueName: \"kubernetes.io/projected/29edf770-4392-4e07-9aee-efa379244b2d-kube-api-access-zs2ht\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897768 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm6fm\" (UniqueName: \"kubernetes.io/projected/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-kube-api-access-wm6fm\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897791 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897805 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d76b2fd-25de-456f-9217-f4585285248c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897831 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897848 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897896 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ad6570a1-7f62-46b4-87e8-6ddea76c4101-images\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897913 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzg7n\" (UniqueName: \"kubernetes.io/projected/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-kube-api-access-vzg7n\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897930 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897958 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897981 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt4mr\" (UniqueName: \"kubernetes.io/projected/2d76b2fd-25de-456f-9217-f4585285248c-kube-api-access-pt4mr\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.897998 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7db5686b-2de9-4e9b-99cc-80924f2c110a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898018 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-config\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898041 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8krnx\" (UniqueName: \"kubernetes.io/projected/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-kube-api-access-8krnx\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898184 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb0bf99c-6af7-48b3-b415-016a751f526e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-7qjv7\" (UID: \"eb0bf99c-6af7-48b3-b415-016a751f526e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898210 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-webhook-cert\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898229 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-service-ca\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898247 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms9nm\" (UniqueName: \"kubernetes.io/projected/7cdb33ce-d579-40e7-934b-94666c8e7c27-kube-api-access-ms9nm\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898267 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-stats-auth\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898287 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-ca\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.898499 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-client-ca\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899072 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq2lv\" (UniqueName: \"kubernetes.io/projected/eb0bf99c-6af7-48b3-b415-016a751f526e-kube-api-access-zq2lv\") pod \"package-server-manager-789f6589d5-7qjv7\" (UID: \"eb0bf99c-6af7-48b3-b415-016a751f526e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899116 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-metrics-tls\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899148 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5d3f667-2064-442a-a0a7-899f35a00d9f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-bzpd6\" (UID: \"b5d3f667-2064-442a-a0a7-899f35a00d9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899177 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899199 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-config\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899222 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-serving-cert\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899239 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-serving-cert\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899277 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d8cbc415-fef2-48a6-b66e-2afe6cd3be27-metrics-tls\") pod \"dns-operator-744455d44c-k2q4s\" (UID: \"d8cbc415-fef2-48a6-b66e-2afe6cd3be27\") " pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899299 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-serving-cert\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899318 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899386 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-audit-policies\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899398 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ftz8\" (UniqueName: \"kubernetes.io/projected/e47976d3-2e57-4943-a744-75ef0accd1ec-kube-api-access-5ftz8\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899456 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-encryption-config\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899481 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ad6570a1-7f62-46b4-87e8-6ddea76c4101-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899498 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899516 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjqxw\" (UniqueName: \"kubernetes.io/projected/78378d8c-602a-4af2-ad85-c1e4330e959e-kube-api-access-hjqxw\") pod \"migrator-59844c95c7-h5swj\" (UID: \"78378d8c-602a-4af2-ad85-c1e4330e959e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899560 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29edf770-4392-4e07-9aee-efa379244b2d-config\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899579 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptmh5\" (UniqueName: \"kubernetes.io/projected/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-kube-api-access-ptmh5\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899631 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d96sh\" (UniqueName: \"kubernetes.io/projected/b5d3f667-2064-442a-a0a7-899f35a00d9f-kube-api-access-d96sh\") pod \"control-plane-machine-set-operator-78cbb6b69f-bzpd6\" (UID: \"b5d3f667-2064-442a-a0a7-899f35a00d9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899650 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e47976d3-2e57-4943-a744-75ef0accd1ec-service-ca-bundle\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899672 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899691 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-metrics-certs\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.899710 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhmsv\" (UniqueName: \"kubernetes.io/projected/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-kube-api-access-qhmsv\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.900519 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-config\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.901036 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.901056 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.901134 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-k2q4s"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.901427 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-tmpfs\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.901559 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-dir\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.901638 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7cdb33ce-d579-40e7-934b-94666c8e7c27-audit-dir\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.902013 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.902582 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.902746 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad6570a1-7f62-46b4-87e8-6ddea76c4101-config\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.903049 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-policies\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.903391 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.903799 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d76b2fd-25de-456f-9217-f4585285248c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.904030 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-trusted-ca\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.904862 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ad6570a1-7f62-46b4-87e8-6ddea76c4101-images\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.905267 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.905744 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-config\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.906200 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d76b2fd-25de-456f-9217-f4585285248c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.906407 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.906454 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ad6570a1-7f62-46b4-87e8-6ddea76c4101-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.906580 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7cdb33ce-d579-40e7-934b-94666c8e7c27-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.906960 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-serving-cert\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.907526 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.908035 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.908427 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-etcd-client\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.908553 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-serving-cert\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.909399 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-zk45b"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.909403 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.911496 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.912294 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4c75357-03b7-433e-a45a-422bb2f54337-serving-cert\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.912859 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.913374 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.913897 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.914130 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-encryption-config\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.914277 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d8cbc415-fef2-48a6-b66e-2afe6cd3be27-metrics-tls\") pod \"dns-operator-744455d44c-k2q4s\" (UID: \"d8cbc415-fef2-48a6-b66e-2afe6cd3be27\") " pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.914745 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.914788 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cl5ps"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.915644 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.916742 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.918937 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.919929 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cdb33ce-d579-40e7-934b-94666c8e7c27-serving-cert\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.920054 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.921159 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.922555 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.923348 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.924234 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.924834 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-config\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.924978 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-7trbl"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.926242 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.927642 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c76cl"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.928803 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.930133 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.931571 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.933028 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-rxvg9"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.933857 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.934196 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwxhh"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.935616 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.938321 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t6hkv"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.940888 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.945730 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.947179 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.949322 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-2gjfj"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.957182 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-k7lp4"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.957811 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.959123 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-k7lp4"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.959243 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-k7lp4" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.960247 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-2gjfj"] Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.963344 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.975743 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-client\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.983825 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Sep 29 13:43:32 crc kubenswrapper[4869]: I0929 13:43:32.995132 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-service-ca\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.003988 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.004576 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d4c75357-03b7-433e-a45a-422bb2f54337-etcd-ca\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.023898 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.043864 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.083151 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.106197 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.123759 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.143776 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.163899 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.170351 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-config-volume\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.184210 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.195948 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-metrics-tls\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.204191 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.223905 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.244536 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.255973 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-apiservice-cert\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.257853 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-webhook-cert\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.264353 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.284597 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.305327 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.325216 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.338577 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7db5686b-2de9-4e9b-99cc-80924f2c110a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.344087 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.353680 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7db5686b-2de9-4e9b-99cc-80924f2c110a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.364029 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.385364 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.397892 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5d3f667-2064-442a-a0a7-899f35a00d9f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-bzpd6\" (UID: \"b5d3f667-2064-442a-a0a7-899f35a00d9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.403520 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.424884 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.444804 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.459681 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb0bf99c-6af7-48b3-b415-016a751f526e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-7qjv7\" (UID: \"eb0bf99c-6af7-48b3-b415-016a751f526e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.465973 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.485026 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.503691 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.523780 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.544939 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.566211 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.577107 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-default-certificate\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.584481 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.599704 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-stats-auth\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.605260 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.620852 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e47976d3-2e57-4943-a744-75ef0accd1ec-metrics-certs\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.624813 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.636071 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e47976d3-2e57-4943-a744-75ef0accd1ec-service-ca-bundle\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.644234 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.664747 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.683949 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.704370 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.718167 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29edf770-4392-4e07-9aee-efa379244b2d-serving-cert\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.724231 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.725845 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29edf770-4392-4e07-9aee-efa379244b2d-config\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.742902 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.782937 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.801493 4869 request.go:700] Waited for 1.011424365s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.804359 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.824596 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.845837 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.864543 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.885472 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.923938 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8k2h\" (UniqueName: \"kubernetes.io/projected/f7ac0775-6bc1-470a-b7cd-e769a213122e-kube-api-access-q8k2h\") pod \"authentication-operator-69f744f599-qqgzh\" (UID: \"f7ac0775-6bc1-470a-b7cd-e769a213122e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.950245 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96ldk\" (UniqueName: \"kubernetes.io/projected/cdc548b0-0411-4ae7-a8bf-df1d9808bf0d-kube-api-access-96ldk\") pod \"apiserver-76f77b778f-s2kxs\" (UID: \"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d\") " pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.964301 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbv4m\" (UniqueName: \"kubernetes.io/projected/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-kube-api-access-zbv4m\") pod \"controller-manager-879f6c89f-vhbp8\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.983668 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.985179 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4bxj\" (UniqueName: \"kubernetes.io/projected/2aa3898a-a2ad-4e76-8927-337e20298d02-kube-api-access-b4bxj\") pod \"cluster-samples-operator-665b6dd947-lnc9f\" (UID: \"2aa3898a-a2ad-4e76-8927-337e20298d02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" Sep 29 13:43:33 crc kubenswrapper[4869]: I0929 13:43:33.990516 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.003146 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.009819 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.025416 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.059459 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25jrc\" (UniqueName: \"kubernetes.io/projected/eb3f0a27-5821-43ba-bc08-482dceab1b74-kube-api-access-25jrc\") pod \"openshift-controller-manager-operator-756b6f6bc6-tph9m\" (UID: \"eb3f0a27-5821-43ba-bc08-482dceab1b74\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.065408 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.084981 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.098213 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.105754 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.125326 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.172457 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsqbw\" (UniqueName: \"kubernetes.io/projected/ac14296d-ce5b-4b73-84f6-3f39e3280f26-kube-api-access-qsqbw\") pod \"console-f9d7485db-m2zpm\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.172822 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.183905 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.204539 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.206789 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.234871 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qqgzh"] Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.238457 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.248710 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkv89\" (UniqueName: \"kubernetes.io/projected/5bca4dbe-67e9-4619-b821-5b187f159ed9-kube-api-access-xkv89\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:34 crc kubenswrapper[4869]: W0929 13:43:34.253888 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7ac0775_6bc1_470a_b7cd_e769a213122e.slice/crio-ee4563fe1d000c6ec52aea91ca50fe42f0281505189850b5e64c001c894b6706 WatchSource:0}: Error finding container ee4563fe1d000c6ec52aea91ca50fe42f0281505189850b5e64c001c894b6706: Status 404 returned error can't find the container with id ee4563fe1d000c6ec52aea91ca50fe42f0281505189850b5e64c001c894b6706 Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.255375 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-s2kxs"] Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.259938 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5bca4dbe-67e9-4619-b821-5b187f159ed9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zkprl\" (UID: \"5bca4dbe-67e9-4619-b821-5b187f159ed9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.263370 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.263712 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.286277 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.310647 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.324311 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f"] Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.357115 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.357399 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.370017 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.390150 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.395824 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.411681 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.449249 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btf6k\" (UniqueName: \"kubernetes.io/projected/cabc3961-9121-416f-962c-80558b14a820-kube-api-access-btf6k\") pod \"openshift-config-operator-7777fb866f-7jlsc\" (UID: \"cabc3961-9121-416f-962c-80558b14a820\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.467090 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.468063 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26hkc\" (UniqueName: \"kubernetes.io/projected/d2000633-937f-4146-8721-cc475e3e930b-kube-api-access-26hkc\") pod \"machine-approver-56656f9798-55q6h\" (UID: \"d2000633-937f-4146-8721-cc475e3e930b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.477882 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.480946 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m2zpm"] Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.484752 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Sep 29 13:43:34 crc kubenswrapper[4869]: W0929 13:43:34.497933 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac14296d_ce5b_4b73_84f6_3f39e3280f26.slice/crio-c3b0d79821e77a98dadd11dbb1e5fc893bd7e363a19e5e1c515e88ebe2c145b3 WatchSource:0}: Error finding container c3b0d79821e77a98dadd11dbb1e5fc893bd7e363a19e5e1c515e88ebe2c145b3: Status 404 returned error can't find the container with id c3b0d79821e77a98dadd11dbb1e5fc893bd7e363a19e5e1c515e88ebe2c145b3 Sep 29 13:43:34 crc kubenswrapper[4869]: W0929 13:43:34.501837 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2000633_937f_4146_8721_cc475e3e930b.slice/crio-d7f402af68da07f698dcd03ee4aa063d615956f983b036fd43d416a69948a782 WatchSource:0}: Error finding container d7f402af68da07f698dcd03ee4aa063d615956f983b036fd43d416a69948a782: Status 404 returned error can't find the container with id d7f402af68da07f698dcd03ee4aa063d615956f983b036fd43d416a69948a782 Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.504591 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.524773 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.554089 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.558681 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m"] Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.564986 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.586212 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.601856 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vhbp8"] Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.605274 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.623915 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.636156 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl"] Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.645215 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.664595 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.677915 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.702783 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m68lw\" (UniqueName: \"kubernetes.io/projected/d4c75357-03b7-433e-a45a-422bb2f54337-kube-api-access-m68lw\") pod \"etcd-operator-b45778765-pq5fc\" (UID: \"d4c75357-03b7-433e-a45a-422bb2f54337\") " pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.723794 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmt8n\" (UniqueName: \"kubernetes.io/projected/d8cbc415-fef2-48a6-b66e-2afe6cd3be27-kube-api-access-gmt8n\") pod \"dns-operator-744455d44c-k2q4s\" (UID: \"d8cbc415-fef2-48a6-b66e-2afe6cd3be27\") " pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.738157 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhhk6\" (UniqueName: \"kubernetes.io/projected/7db5686b-2de9-4e9b-99cc-80924f2c110a-kube-api-access-fhhk6\") pod \"kube-storage-version-migrator-operator-b67b599dd-5hgz7\" (UID: \"7db5686b-2de9-4e9b-99cc-80924f2c110a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.757849 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq2lv\" (UniqueName: \"kubernetes.io/projected/eb0bf99c-6af7-48b3-b415-016a751f526e-kube-api-access-zq2lv\") pod \"package-server-manager-789f6589d5-7qjv7\" (UID: \"eb0bf99c-6af7-48b3-b415-016a751f526e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.780419 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs2ht\" (UniqueName: \"kubernetes.io/projected/29edf770-4392-4e07-9aee-efa379244b2d-kube-api-access-zs2ht\") pod \"service-ca-operator-777779d784-wfwrw\" (UID: \"29edf770-4392-4e07-9aee-efa379244b2d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.794061 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.802442 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.802873 4869 request.go:700] Waited for 1.89993128s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/default/token Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.810994 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm6fm\" (UniqueName: \"kubernetes.io/projected/3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac-kube-api-access-wm6fm\") pod \"dns-default-zk45b\" (UID: \"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac\") " pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.821005 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.835587 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmcm6\" (UniqueName: \"kubernetes.io/projected/41996eeb-6d4e-45e1-b140-f2ff7f0bec29-kube-api-access-tmcm6\") pod \"downloads-7954f5f757-b8gtg\" (UID: \"41996eeb-6d4e-45e1-b140-f2ff7f0bec29\") " pod="openshift-console/downloads-7954f5f757-b8gtg" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.836990 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.838578 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9z68d\" (UniqueName: \"kubernetes.io/projected/ad6570a1-7f62-46b4-87e8-6ddea76c4101-kube-api-access-9z68d\") pod \"machine-api-operator-5694c8668f-k2ml4\" (UID: \"ad6570a1-7f62-46b4-87e8-6ddea76c4101\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.859459 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8krnx\" (UniqueName: \"kubernetes.io/projected/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-kube-api-access-8krnx\") pod \"oauth-openshift-558db77b4-7trbl\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.872016 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.879642 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.886765 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms9nm\" (UniqueName: \"kubernetes.io/projected/7cdb33ce-d579-40e7-934b-94666c8e7c27-kube-api-access-ms9nm\") pod \"apiserver-7bbb656c7d-kkrrb\" (UID: \"7cdb33ce-d579-40e7-934b-94666c8e7c27\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.908973 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc"] Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.915011 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d96sh\" (UniqueName: \"kubernetes.io/projected/b5d3f667-2064-442a-a0a7-899f35a00d9f-kube-api-access-d96sh\") pod \"control-plane-machine-set-operator-78cbb6b69f-bzpd6\" (UID: \"b5d3f667-2064-442a-a0a7-899f35a00d9f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.925846 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptmh5\" (UniqueName: \"kubernetes.io/projected/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-kube-api-access-ptmh5\") pod \"route-controller-manager-6576b87f9c-7467k\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:34 crc kubenswrapper[4869]: W0929 13:43:34.928198 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcabc3961_9121_416f_962c_80558b14a820.slice/crio-82e57453857d1061855a64f37947abe0757cec8c1ad33984915bcc1d414bb27e WatchSource:0}: Error finding container 82e57453857d1061855a64f37947abe0757cec8c1ad33984915bcc1d414bb27e: Status 404 returned error can't find the container with id 82e57453857d1061855a64f37947abe0757cec8c1ad33984915bcc1d414bb27e Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.938536 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjqxw\" (UniqueName: \"kubernetes.io/projected/78378d8c-602a-4af2-ad85-c1e4330e959e-kube-api-access-hjqxw\") pod \"migrator-59844c95c7-h5swj\" (UID: \"78378d8c-602a-4af2-ad85-c1e4330e959e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.965778 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhmsv\" (UniqueName: \"kubernetes.io/projected/a61b3f02-628b-4dbf-bebe-499b9ee8b96f-kube-api-access-qhmsv\") pod \"console-operator-58897d9998-t7kkw\" (UID: \"a61b3f02-628b-4dbf-bebe-499b9ee8b96f\") " pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.978126 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" event={"ID":"eb3f0a27-5821-43ba-bc08-482dceab1b74","Type":"ContainerStarted","Data":"83c4fa1166f612abe9d04b657b8c35b37bfdb83abfbceadd190d7ff4fae5cff2"} Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.978175 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" event={"ID":"eb3f0a27-5821-43ba-bc08-482dceab1b74","Type":"ContainerStarted","Data":"59d1ea4d9ecae64af79c2406b2e86f9d2ad6cbf53aae8582844061ae593bffba"} Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.982061 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ftz8\" (UniqueName: \"kubernetes.io/projected/e47976d3-2e57-4943-a744-75ef0accd1ec-kube-api-access-5ftz8\") pod \"router-default-5444994796-2ghwn\" (UID: \"e47976d3-2e57-4943-a744-75ef0accd1ec\") " pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.986643 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" event={"ID":"e6c3c3e6-62ba-4301-bd4a-f5cafd385463","Type":"ContainerStarted","Data":"45fb2495ec39c8ff8a11c7bec4263955f46efd4c6c3c3791d06bb77514f764fc"} Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.986694 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" event={"ID":"e6c3c3e6-62ba-4301-bd4a-f5cafd385463","Type":"ContainerStarted","Data":"d9c56dbc24de65b361db1b57c6f20b8a4def4e40fcc39ebfa8cb714a97e60c59"} Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.987445 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.990767 4869 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-vhbp8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.990826 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" podUID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.994008 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" event={"ID":"2aa3898a-a2ad-4e76-8927-337e20298d02","Type":"ContainerStarted","Data":"ad880bad78b1609d8c5645456b842a0fca81d41a5391670c0ac8917d3bac0ee6"} Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.999319 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m2zpm" event={"ID":"ac14296d-ce5b-4b73-84f6-3f39e3280f26","Type":"ContainerStarted","Data":"a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e"} Sep 29 13:43:34 crc kubenswrapper[4869]: I0929 13:43:34.999353 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m2zpm" event={"ID":"ac14296d-ce5b-4b73-84f6-3f39e3280f26","Type":"ContainerStarted","Data":"c3b0d79821e77a98dadd11dbb1e5fc893bd7e363a19e5e1c515e88ebe2c145b3"} Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.004438 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" event={"ID":"f7ac0775-6bc1-470a-b7cd-e769a213122e","Type":"ContainerStarted","Data":"706bd72f0eb3febb3bd1330fe991cf90c25c9da645eb31857c320cd1f618d36c"} Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.004510 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" event={"ID":"f7ac0775-6bc1-470a-b7cd-e769a213122e","Type":"ContainerStarted","Data":"ee4563fe1d000c6ec52aea91ca50fe42f0281505189850b5e64c001c894b6706"} Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.008102 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" event={"ID":"5bca4dbe-67e9-4619-b821-5b187f159ed9","Type":"ContainerStarted","Data":"0284c84f832765f49c3a68601e32d08401ddd279773c35057f6d1deff0a4fb19"} Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.012311 4869 generic.go:334] "Generic (PLEG): container finished" podID="cdc548b0-0411-4ae7-a8bf-df1d9808bf0d" containerID="2c2c8c1db168a373f74d831fe8a6a13cb9717e74c40ea337e22dd60c08fa13b5" exitCode=0 Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.012379 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt4mr\" (UniqueName: \"kubernetes.io/projected/2d76b2fd-25de-456f-9217-f4585285248c-kube-api-access-pt4mr\") pod \"openshift-apiserver-operator-796bbdcf4f-bs57s\" (UID: \"2d76b2fd-25de-456f-9217-f4585285248c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.012397 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" event={"ID":"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d","Type":"ContainerDied","Data":"2c2c8c1db168a373f74d831fe8a6a13cb9717e74c40ea337e22dd60c08fa13b5"} Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.012431 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" event={"ID":"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d","Type":"ContainerStarted","Data":"284965eaf605e81d4285dcebf458480f8e4e8d704d13d5b0d076039fb0565e13"} Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.014935 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.015428 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" event={"ID":"cabc3961-9121-416f-962c-80558b14a820","Type":"ContainerStarted","Data":"82e57453857d1061855a64f37947abe0757cec8c1ad33984915bcc1d414bb27e"} Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.018442 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" event={"ID":"d2000633-937f-4146-8721-cc475e3e930b","Type":"ContainerStarted","Data":"d7f402af68da07f698dcd03ee4aa063d615956f983b036fd43d416a69948a782"} Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.026066 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-b8gtg" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.030361 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.033172 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.040656 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.043669 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.047012 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzg7n\" (UniqueName: \"kubernetes.io/projected/41fc0fbf-c0c9-431f-9f56-ef4296e6b25a-kube-api-access-vzg7n\") pod \"packageserver-d55dfcdfc-8lrvm\" (UID: \"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.053282 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.061375 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.065404 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.083358 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.083820 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.103856 4869 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.113199 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.124597 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.133584 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.146344 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.146577 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.168392 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.180091 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.187140 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.209366 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.225406 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-k2q4s"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.271516 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.271946 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-bound-sa-token\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.271972 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a39dcbb0-84e5-458c-9a0a-6d3388f423df-ca-trust-extracted\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.272015 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-tls\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.272034 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25b39162-f5d9-4bef-90e5-e3ed59225936-config\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.272053 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25b39162-f5d9-4bef-90e5-e3ed59225936-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.272071 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-certificates\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.272120 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:35.772097017 +0000 UTC m=+142.212741537 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.272176 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25b39162-f5d9-4bef-90e5-e3ed59225936-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.272283 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdwsm\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-kube-api-access-sdwsm\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.272411 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-trusted-ca\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.272440 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a39dcbb0-84e5-458c-9a0a-6d3388f423df-installation-pull-secrets\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.375051 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378432 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmtn2\" (UniqueName: \"kubernetes.io/projected/88d471bd-e253-4961-b6a3-4da4d15f7bc9-kube-api-access-hmtn2\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378507 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgvjg\" (UniqueName: \"kubernetes.io/projected/82aa7000-191d-49ff-8a4c-e6f49b4946c3-kube-api-access-dgvjg\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378560 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-trusted-ca\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378581 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a39dcbb0-84e5-458c-9a0a-6d3388f423df-installation-pull-secrets\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378645 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrwrg\" (UniqueName: \"kubernetes.io/projected/bf20c45d-df7c-4122-a218-630d563e72c8-kube-api-access-hrwrg\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378756 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f4028e9-fd1c-44d6-9811-43ef6e558655-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378779 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-profile-collector-cert\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378855 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-mountpoint-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378899 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-registration-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378950 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-certs\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378973 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/63169d28-deb7-44a8-afd2-48f4fbb6fa69-metrics-tls\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.378994 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hstb7\" (UniqueName: \"kubernetes.io/projected/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-kube-api-access-hstb7\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379012 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-secret-volume\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379029 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzsss\" (UniqueName: \"kubernetes.io/projected/63169d28-deb7-44a8-afd2-48f4fbb6fa69-kube-api-access-rzsss\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379114 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/82aa7000-191d-49ff-8a4c-e6f49b4946c3-srv-cert\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379141 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f4028e9-fd1c-44d6-9811-43ef6e558655-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379384 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-bound-sa-token\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379478 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-node-bootstrap-token\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379512 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f4028e9-fd1c-44d6-9811-43ef6e558655-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379561 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a39dcbb0-84e5-458c-9a0a-6d3388f423df-ca-trust-extracted\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379583 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf20c45d-df7c-4122-a218-630d563e72c8-proxy-tls\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379603 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379676 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-proxy-tls\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379765 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9npd\" (UniqueName: \"kubernetes.io/projected/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-kube-api-access-c9npd\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379845 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379874 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-images\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379922 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-config-volume\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379946 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-tls\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379962 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-srv-cert\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.379980 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsd2q\" (UniqueName: \"kubernetes.io/projected/80fc78d5-95e1-4be9-bb44-52692b4409db-kube-api-access-hsd2q\") pod \"multus-admission-controller-857f4d67dd-t6hkv\" (UID: \"80fc78d5-95e1-4be9-bb44-52692b4409db\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380024 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63169d28-deb7-44a8-afd2-48f4fbb6fa69-trusted-ca\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380083 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25b39162-f5d9-4bef-90e5-e3ed59225936-config\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380103 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25b39162-f5d9-4bef-90e5-e3ed59225936-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380129 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8hm6\" (UniqueName: \"kubernetes.io/projected/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-kube-api-access-f8hm6\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380211 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-certificates\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380249 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25b39162-f5d9-4bef-90e5-e3ed59225936-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380419 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-plugins-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380447 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/88d471bd-e253-4961-b6a3-4da4d15f7bc9-signing-key\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380515 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-socket-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380535 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380557 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/80fc78d5-95e1-4be9-bb44-52692b4409db-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t6hkv\" (UID: \"80fc78d5-95e1-4be9-bb44-52692b4409db\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380596 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/88d471bd-e253-4961-b6a3-4da4d15f7bc9-signing-cabundle\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380658 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/82aa7000-191d-49ff-8a4c-e6f49b4946c3-profile-collector-cert\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380709 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380736 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2mzx\" (UniqueName: \"kubernetes.io/projected/773d63e6-d34c-4320-8cab-c77b91b3c8b2-kube-api-access-s2mzx\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380754 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcp4q\" (UniqueName: \"kubernetes.io/projected/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-kube-api-access-xcp4q\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380785 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7phw5\" (UniqueName: \"kubernetes.io/projected/f1728b99-11ce-48cd-990b-d68af8a3f006-kube-api-access-7phw5\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380802 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/63169d28-deb7-44a8-afd2-48f4fbb6fa69-bound-sa-token\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380840 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380858 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-config\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380898 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdwsm\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-kube-api-access-sdwsm\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380917 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf20c45d-df7c-4122-a218-630d563e72c8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.380982 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-csi-data-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.381000 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5w2rs\" (UniqueName: \"kubernetes.io/projected/1995f69c-86b1-47cc-b76e-69bc593e6520-kube-api-access-5w2rs\") pod \"ingress-canary-k7lp4\" (UID: \"1995f69c-86b1-47cc-b76e-69bc593e6520\") " pod="openshift-ingress-canary/ingress-canary-k7lp4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.381092 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1995f69c-86b1-47cc-b76e-69bc593e6520-cert\") pod \"ingress-canary-k7lp4\" (UID: \"1995f69c-86b1-47cc-b76e-69bc593e6520\") " pod="openshift-ingress-canary/ingress-canary-k7lp4" Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.381229 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:35.881210456 +0000 UTC m=+142.321854776 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.386998 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25b39162-f5d9-4bef-90e5-e3ed59225936-config\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.387534 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a39dcbb0-84e5-458c-9a0a-6d3388f423df-ca-trust-extracted\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.392162 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-trusted-ca\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.392739 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-certificates\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.401370 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a39dcbb0-84e5-458c-9a0a-6d3388f423df-installation-pull-secrets\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.402905 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25b39162-f5d9-4bef-90e5-e3ed59225936-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.407982 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-tls\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.435348 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-bound-sa-token\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.471498 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25b39162-f5d9-4bef-90e5-e3ed59225936-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-jc6l9\" (UID: \"25b39162-f5d9-4bef-90e5-e3ed59225936\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482745 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-profile-collector-cert\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482786 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-mountpoint-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482809 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482827 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-registration-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482844 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-certs\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482868 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/63169d28-deb7-44a8-afd2-48f4fbb6fa69-metrics-tls\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482886 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzsss\" (UniqueName: \"kubernetes.io/projected/63169d28-deb7-44a8-afd2-48f4fbb6fa69-kube-api-access-rzsss\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482905 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hstb7\" (UniqueName: \"kubernetes.io/projected/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-kube-api-access-hstb7\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482921 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-secret-volume\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482949 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f4028e9-fd1c-44d6-9811-43ef6e558655-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482963 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/82aa7000-191d-49ff-8a4c-e6f49b4946c3-srv-cert\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.482988 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f4028e9-fd1c-44d6-9811-43ef6e558655-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483004 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-node-bootstrap-token\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483019 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf20c45d-df7c-4122-a218-630d563e72c8-proxy-tls\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483035 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483053 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-proxy-tls\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483078 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9npd\" (UniqueName: \"kubernetes.io/projected/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-kube-api-access-c9npd\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483095 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483112 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-images\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483145 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-srv-cert\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483162 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-config-volume\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483180 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsd2q\" (UniqueName: \"kubernetes.io/projected/80fc78d5-95e1-4be9-bb44-52692b4409db-kube-api-access-hsd2q\") pod \"multus-admission-controller-857f4d67dd-t6hkv\" (UID: \"80fc78d5-95e1-4be9-bb44-52692b4409db\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483198 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63169d28-deb7-44a8-afd2-48f4fbb6fa69-trusted-ca\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483217 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8hm6\" (UniqueName: \"kubernetes.io/projected/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-kube-api-access-f8hm6\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483246 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-plugins-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483271 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/88d471bd-e253-4961-b6a3-4da4d15f7bc9-signing-key\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483292 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-socket-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483309 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483330 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/88d471bd-e253-4961-b6a3-4da4d15f7bc9-signing-cabundle\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483366 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/80fc78d5-95e1-4be9-bb44-52692b4409db-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t6hkv\" (UID: \"80fc78d5-95e1-4be9-bb44-52692b4409db\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483386 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483401 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2mzx\" (UniqueName: \"kubernetes.io/projected/773d63e6-d34c-4320-8cab-c77b91b3c8b2-kube-api-access-s2mzx\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483415 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/82aa7000-191d-49ff-8a4c-e6f49b4946c3-profile-collector-cert\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483431 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcp4q\" (UniqueName: \"kubernetes.io/projected/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-kube-api-access-xcp4q\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483451 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7phw5\" (UniqueName: \"kubernetes.io/projected/f1728b99-11ce-48cd-990b-d68af8a3f006-kube-api-access-7phw5\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483466 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/63169d28-deb7-44a8-afd2-48f4fbb6fa69-bound-sa-token\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483485 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483500 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-config\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483520 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf20c45d-df7c-4122-a218-630d563e72c8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483543 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-csi-data-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483559 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5w2rs\" (UniqueName: \"kubernetes.io/projected/1995f69c-86b1-47cc-b76e-69bc593e6520-kube-api-access-5w2rs\") pod \"ingress-canary-k7lp4\" (UID: \"1995f69c-86b1-47cc-b76e-69bc593e6520\") " pod="openshift-ingress-canary/ingress-canary-k7lp4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483578 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1995f69c-86b1-47cc-b76e-69bc593e6520-cert\") pod \"ingress-canary-k7lp4\" (UID: \"1995f69c-86b1-47cc-b76e-69bc593e6520\") " pod="openshift-ingress-canary/ingress-canary-k7lp4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483595 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmtn2\" (UniqueName: \"kubernetes.io/projected/88d471bd-e253-4961-b6a3-4da4d15f7bc9-kube-api-access-hmtn2\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483627 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgvjg\" (UniqueName: \"kubernetes.io/projected/82aa7000-191d-49ff-8a4c-e6f49b4946c3-kube-api-access-dgvjg\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483646 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrwrg\" (UniqueName: \"kubernetes.io/projected/bf20c45d-df7c-4122-a218-630d563e72c8-kube-api-access-hrwrg\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483672 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f4028e9-fd1c-44d6-9811-43ef6e558655-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.483735 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdwsm\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-kube-api-access-sdwsm\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.484739 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f4028e9-fd1c-44d6-9811-43ef6e558655-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.486089 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.486575 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-plugins-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.489220 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/88d471bd-e253-4961-b6a3-4da4d15f7bc9-signing-cabundle\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.490028 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f4028e9-fd1c-44d6-9811-43ef6e558655-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.490167 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.490642 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-config\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.491790 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-socket-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.491925 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63169d28-deb7-44a8-afd2-48f4fbb6fa69-trusted-ca\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.493908 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-mountpoint-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.495691 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-images\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.497787 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/82aa7000-191d-49ff-8a4c-e6f49b4946c3-srv-cert\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.499206 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.506477 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-node-bootstrap-token\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.510054 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-certs\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.513974 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/63169d28-deb7-44a8-afd2-48f4fbb6fa69-metrics-tls\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.514728 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.014706763 +0000 UTC m=+142.455351083 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.515315 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-registration-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.517795 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f1728b99-11ce-48cd-990b-d68af8a3f006-csi-data-dir\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.518532 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf20c45d-df7c-4122-a218-630d563e72c8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.519474 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-config-volume\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.527797 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf20c45d-df7c-4122-a218-630d563e72c8-proxy-tls\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.527968 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/80fc78d5-95e1-4be9-bb44-52692b4409db-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t6hkv\" (UID: \"80fc78d5-95e1-4be9-bb44-52692b4409db\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.529921 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.532498 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.533127 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/82aa7000-191d-49ff-8a4c-e6f49b4946c3-profile-collector-cert\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.543007 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-secret-volume\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.544495 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-proxy-tls\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.546685 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-zk45b"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.551320 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/88d471bd-e253-4961-b6a3-4da4d15f7bc9-signing-key\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.559967 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1995f69c-86b1-47cc-b76e-69bc593e6520-cert\") pod \"ingress-canary-k7lp4\" (UID: \"1995f69c-86b1-47cc-b76e-69bc593e6520\") " pod="openshift-ingress-canary/ingress-canary-k7lp4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.561536 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-profile-collector-cert\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.566993 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsd2q\" (UniqueName: \"kubernetes.io/projected/80fc78d5-95e1-4be9-bb44-52692b4409db-kube-api-access-hsd2q\") pod \"multus-admission-controller-857f4d67dd-t6hkv\" (UID: \"80fc78d5-95e1-4be9-bb44-52692b4409db\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.567752 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-srv-cert\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.571412 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.571467 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-pq5fc"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.577292 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8hm6\" (UniqueName: \"kubernetes.io/projected/74ca8d3d-03dd-47e5-8d58-ded2cb2f9886-kube-api-access-f8hm6\") pod \"catalog-operator-68c6474976-fb4q6\" (UID: \"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.578746 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.580230 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcp4q\" (UniqueName: \"kubernetes.io/projected/3d9c83b7-6dd1-4be7-ba30-7d030eae6524-kube-api-access-xcp4q\") pod \"machine-config-server-rxvg9\" (UID: \"3d9c83b7-6dd1-4be7-ba30-7d030eae6524\") " pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.582648 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7phw5\" (UniqueName: \"kubernetes.io/projected/f1728b99-11ce-48cd-990b-d68af8a3f006-kube-api-access-7phw5\") pod \"csi-hostpathplugin-2gjfj\" (UID: \"f1728b99-11ce-48cd-990b-d68af8a3f006\") " pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.587308 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.587941 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.087910852 +0000 UTC m=+142.528555162 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.605077 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/63169d28-deb7-44a8-afd2-48f4fbb6fa69-bound-sa-token\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.608850 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.645272 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f4028e9-fd1c-44d6-9811-43ef6e558655-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zszgw\" (UID: \"5f4028e9-fd1c-44d6-9811-43ef6e558655\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.667173 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2mzx\" (UniqueName: \"kubernetes.io/projected/773d63e6-d34c-4320-8cab-c77b91b3c8b2-kube-api-access-s2mzx\") pod \"marketplace-operator-79b997595-bwxhh\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.688849 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.689666 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.189650804 +0000 UTC m=+142.630295124 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.707936 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9npd\" (UniqueName: \"kubernetes.io/projected/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-kube-api-access-c9npd\") pod \"collect-profiles-29319210-6mqqj\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.735882 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hstb7\" (UniqueName: \"kubernetes.io/projected/2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef-kube-api-access-hstb7\") pod \"machine-config-operator-74547568cd-xnzv4\" (UID: \"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.741304 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzsss\" (UniqueName: \"kubernetes.io/projected/63169d28-deb7-44a8-afd2-48f4fbb6fa69-kube-api-access-rzsss\") pod \"ingress-operator-5b745b69d9-qvmsj\" (UID: \"63169d28-deb7-44a8-afd2-48f4fbb6fa69\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.754987 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ecf75dd0-8b7e-4a13-8658-cb848ee6aeed-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-s6jkm\" (UID: \"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.756001 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.780385 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmtn2\" (UniqueName: \"kubernetes.io/projected/88d471bd-e253-4961-b6a3-4da4d15f7bc9-kube-api-access-hmtn2\") pod \"service-ca-9c57cc56f-c76cl\" (UID: \"88d471bd-e253-4961-b6a3-4da4d15f7bc9\") " pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.791704 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.792238 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.292219786 +0000 UTC m=+142.732864116 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.793176 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.809351 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.817113 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.825313 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-k2ml4"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.825373 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.825969 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.826093 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgvjg\" (UniqueName: \"kubernetes.io/projected/82aa7000-191d-49ff-8a4c-e6f49b4946c3-kube-api-access-dgvjg\") pod \"olm-operator-6b444d44fb-6w87k\" (UID: \"82aa7000-191d-49ff-8a4c-e6f49b4946c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.828156 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t7kkw"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.829859 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrwrg\" (UniqueName: \"kubernetes.io/projected/bf20c45d-df7c-4122-a218-630d563e72c8-kube-api-access-hrwrg\") pod \"machine-config-controller-84d6567774-p52tc\" (UID: \"bf20c45d-df7c-4122-a218-630d563e72c8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.832348 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.838551 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.847904 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.851848 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5w2rs\" (UniqueName: \"kubernetes.io/projected/1995f69c-86b1-47cc-b76e-69bc593e6520-kube-api-access-5w2rs\") pod \"ingress-canary-k7lp4\" (UID: \"1995f69c-86b1-47cc-b76e-69bc593e6520\") " pod="openshift-ingress-canary/ingress-canary-k7lp4" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.859805 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.862449 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.862991 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rxvg9" Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.882676 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-7trbl"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.893869 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.894324 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.3943048 +0000 UTC m=+142.834949120 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.903471 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-b8gtg"] Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.914559 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-k7lp4" Sep 29 13:43:35 crc kubenswrapper[4869]: W0929 13:43:35.966540 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda61b3f02_628b_4dbf_bebe_499b9ee8b96f.slice/crio-7072403c89420d27d8ff9867e24111350cdd8714bbca954a13f0cf5a56ae172e WatchSource:0}: Error finding container 7072403c89420d27d8ff9867e24111350cdd8714bbca954a13f0cf5a56ae172e: Status 404 returned error can't find the container with id 7072403c89420d27d8ff9867e24111350cdd8714bbca954a13f0cf5a56ae172e Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.996930 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.997337 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.497300876 +0000 UTC m=+142.937945196 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:35 crc kubenswrapper[4869]: I0929 13:43:35.997669 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:35 crc kubenswrapper[4869]: E0929 13:43:35.998582 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.498548169 +0000 UTC m=+142.939192499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.033704 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" event={"ID":"d2000633-937f-4146-8721-cc475e3e930b","Type":"ContainerStarted","Data":"6a6d720b3283be645b27d7cb1f0a0eb9d782634894cf5d314d925ecffa740c89"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.033747 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" event={"ID":"d2000633-937f-4146-8721-cc475e3e930b","Type":"ContainerStarted","Data":"1ac7eac7b41a4d6b5997f5acc264923a5424eeaf84620b559142910338bed159"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.043464 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" event={"ID":"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d","Type":"ContainerStarted","Data":"78c47ac31fef4c1ec5cc0fc8b1da2e9bc980c893a674f9792262708e0e3a22e2"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.045748 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" event={"ID":"eb0bf99c-6af7-48b3-b415-016a751f526e","Type":"ContainerStarted","Data":"14a2c8aa4666a38bdb647c40a9f9be67ff3b48e4ae374b5ffe711818c8f31336"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.051757 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-zk45b" event={"ID":"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac","Type":"ContainerStarted","Data":"9cc440e8d8e244ddfb50b9e918c72fff269e22fc6e6ba475993eb2b91e00d046"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.052918 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" event={"ID":"7db5686b-2de9-4e9b-99cc-80924f2c110a","Type":"ContainerStarted","Data":"efd6087180e61ea4c0f9ca5b2ffe0a8b98ad444793fa7a0514fef38d164646c1"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.056512 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.058535 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" event={"ID":"d8cbc415-fef2-48a6-b66e-2afe6cd3be27","Type":"ContainerStarted","Data":"5eddca0ed094de52d5b8b7998de43b2c3c481a3bba7e637138018f23843ffd6b"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.058573 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" event={"ID":"d8cbc415-fef2-48a6-b66e-2afe6cd3be27","Type":"ContainerStarted","Data":"3a3edd51f0499f8b074971306fcb78c6a57e69dec1e1f3bfe79b2c5727577702"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.059991 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" event={"ID":"29edf770-4392-4e07-9aee-efa379244b2d","Type":"ContainerStarted","Data":"49f999c38b67668737ca5a892b7ea41db5118958c17e32053312705503b19c6f"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.069046 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" event={"ID":"d93f1bc3-9e18-4541-909a-7eb51a5fedd0","Type":"ContainerStarted","Data":"140ef9b9e74ff6485d126291c14bc51a0bd82d87e952396ae9882bd5770ef31d"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.080664 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" event={"ID":"d633942f-d083-44fd-b0e0-1dce9b0fdf0b","Type":"ContainerStarted","Data":"c51b829e1b6fd72dae19c12b7eb95cb8e99daf72ea7575e7955bf6935a8d620c"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.102950 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" event={"ID":"ad6570a1-7f62-46b4-87e8-6ddea76c4101","Type":"ContainerStarted","Data":"e0d5b14e46835cd94a176fb5302c75074304e0a7eeb5bb3bc36beeebf5735698"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.103382 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.104017 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.104730 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.107653 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.607626427 +0000 UTC m=+143.048270757 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.111720 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" podStartSLOduration=118.111703169 podStartE2EDuration="1m58.111703169s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:36.106658823 +0000 UTC m=+142.547303133" watchObservedRunningTime="2025-09-29 13:43:36.111703169 +0000 UTC m=+142.552347489" Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.111890 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" event={"ID":"d4c75357-03b7-433e-a45a-422bb2f54337","Type":"ContainerStarted","Data":"88b8c8fc859cdc45adb0f48913f037aee57b1e004cce73eacc58487cedbc39d8"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.114315 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.120448 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" event={"ID":"a61b3f02-628b-4dbf-bebe-499b9ee8b96f","Type":"ContainerStarted","Data":"7072403c89420d27d8ff9867e24111350cdd8714bbca954a13f0cf5a56ae172e"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.128546 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" event={"ID":"2aa3898a-a2ad-4e76-8927-337e20298d02","Type":"ContainerStarted","Data":"80e674904a881b7707afe49bcc628983b52d9d962c8d74d159b0f424ccf7b9de"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.128600 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" event={"ID":"2aa3898a-a2ad-4e76-8927-337e20298d02","Type":"ContainerStarted","Data":"c1950f2b08957f0db37006d0140444ccace1d289970cb852f34a4707f941afb6"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.130954 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.136636 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-2gjfj"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.145104 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" event={"ID":"5bca4dbe-67e9-4619-b821-5b187f159ed9","Type":"ContainerStarted","Data":"06a59b445bd5b9649874ffd64020229e0aa4a48bd03896e7971ce0c904f1bb28"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.147840 4869 generic.go:334] "Generic (PLEG): container finished" podID="cabc3961-9121-416f-962c-80558b14a820" containerID="a8f507fa9d9787157055c5bf6d575b12286fb208cd67639a751645db7052b5ef" exitCode=0 Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.147910 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" event={"ID":"cabc3961-9121-416f-962c-80558b14a820","Type":"ContainerDied","Data":"a8f507fa9d9787157055c5bf6d575b12286fb208cd67639a751645db7052b5ef"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.151174 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-2ghwn" event={"ID":"e47976d3-2e57-4943-a744-75ef0accd1ec","Type":"ContainerStarted","Data":"51bd67a63ced80a8370e0fbbd32862e4c0dbd01aa769a586e2ecf1a0110b55f7"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.151217 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-2ghwn" event={"ID":"e47976d3-2e57-4943-a744-75ef0accd1ec","Type":"ContainerStarted","Data":"824137d42bcd60ff59090b4d87a1448d752d31604372277cd8fed168be028f3d"} Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.152295 4869 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-vhbp8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.152340 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" podUID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.172075 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.173086 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.173185 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.203580 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.209333 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.210806 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.710784729 +0000 UTC m=+143.151429269 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: W0929 13:43:36.216446 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1728b99_11ce_48cd_990b_d68af8a3f006.slice/crio-129216fff26340445066f3743941795f0d8bb1c7b633ae5f3231191b51914b03 WatchSource:0}: Error finding container 129216fff26340445066f3743941795f0d8bb1c7b633ae5f3231191b51914b03: Status 404 returned error can't find the container with id 129216fff26340445066f3743941795f0d8bb1c7b633ae5f3231191b51914b03 Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.220253 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.311834 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.312036 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.812003983 +0000 UTC m=+143.252648303 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.312383 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.313040 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.813008378 +0000 UTC m=+143.253652698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.413316 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.414132 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:36.914110218 +0000 UTC m=+143.354754538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.486300 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.515473 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.515887 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.015872591 +0000 UTC m=+143.456516911 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.521100 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwxhh"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.583233 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.622359 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.623791 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.123756647 +0000 UTC m=+143.564400977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.626273 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.627088 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.127066133 +0000 UTC m=+143.567710463 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.629310 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-qqgzh" podStartSLOduration=119.62928387 podStartE2EDuration="1m59.62928387s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:36.621419216 +0000 UTC m=+143.062063536" watchObservedRunningTime="2025-09-29 13:43:36.62928387 +0000 UTC m=+143.069928190" Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.669363 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tph9m" podStartSLOduration=118.669342235 podStartE2EDuration="1m58.669342235s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:36.667134098 +0000 UTC m=+143.107778418" watchObservedRunningTime="2025-09-29 13:43:36.669342235 +0000 UTC m=+143.109986555" Sep 29 13:43:36 crc kubenswrapper[4869]: W0929 13:43:36.713134 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d9c83b7_6dd1_4be7_ba30_7d030eae6524.slice/crio-2968021ff448b960c2aa1b7b8beb27f6d475a693a6fa7721712b1cc786df80b9 WatchSource:0}: Error finding container 2968021ff448b960c2aa1b7b8beb27f6d475a693a6fa7721712b1cc786df80b9: Status 404 returned error can't find the container with id 2968021ff448b960c2aa1b7b8beb27f6d475a693a6fa7721712b1cc786df80b9 Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.729289 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.729953 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.229928694 +0000 UTC m=+143.670573024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.834084 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.834564 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.334546416 +0000 UTC m=+143.775190736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.880585 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-m2zpm" podStartSLOduration=118.880560579 podStartE2EDuration="1m58.880560579s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:36.82574996 +0000 UTC m=+143.266394280" watchObservedRunningTime="2025-09-29 13:43:36.880560579 +0000 UTC m=+143.321204899" Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.905011 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw"] Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.934480 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:36 crc kubenswrapper[4869]: E0929 13:43:36.934953 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.434938082 +0000 UTC m=+143.875582402 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:36 crc kubenswrapper[4869]: I0929 13:43:36.968978 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t6hkv"] Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.036788 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.037190 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.537175942 +0000 UTC m=+143.977820252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.137921 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.138651 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.638630024 +0000 UTC m=+144.079274334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.191783 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:37 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:37 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:37 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.191891 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.241846 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.242339 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.742323724 +0000 UTC m=+144.182968044 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.344279 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.344924 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.844894206 +0000 UTC m=+144.285538526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.365072 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" event={"ID":"a61b3f02-628b-4dbf-bebe-499b9ee8b96f","Type":"ContainerStarted","Data":"9ddadc2b54915652e2d97c83af0beb70e293ce732573190ac3c25f3f07352e6b"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.366365 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.390973 4869 patch_prober.go:28] interesting pod/console-operator-58897d9998-t7kkw container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.391441 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" podUID="a61b3f02-628b-4dbf-bebe-499b9ee8b96f" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.465958 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" event={"ID":"29edf770-4392-4e07-9aee-efa379244b2d","Type":"ContainerStarted","Data":"c84ca1e83ec511852fddf84b75a5229e6f93c47eeca9ee9b170d6f2be328c133"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.467972 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.469791 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:37.969774434 +0000 UTC m=+144.410418754 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.487500 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zkprl" podStartSLOduration=119.48747296 podStartE2EDuration="1m59.48747296s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.391879221 +0000 UTC m=+143.832523551" watchObservedRunningTime="2025-09-29 13:43:37.48747296 +0000 UTC m=+143.928117280" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.571406 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.572882 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.072863873 +0000 UTC m=+144.513508193 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.573695 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" event={"ID":"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed","Type":"ContainerStarted","Data":"771af9f898368ebe4ca4c96f1235f6b9e70e4a25f40799f78359ace5f655e7ce"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.575434 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lnc9f" podStartSLOduration=120.575406181 podStartE2EDuration="2m0.575406181s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.489463919 +0000 UTC m=+143.930108239" watchObservedRunningTime="2025-09-29 13:43:37.575406181 +0000 UTC m=+144.016050501" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.634167 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" event={"ID":"78378d8c-602a-4af2-ad85-c1e4330e959e","Type":"ContainerStarted","Data":"a03f5dc545b21d258b197a7cddebf49e5a62276387c0f71e1383576d9450535a"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.636105 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-55q6h" podStartSLOduration=120.636094384 podStartE2EDuration="2m0.636094384s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.634765058 +0000 UTC m=+144.075409398" watchObservedRunningTime="2025-09-29 13:43:37.636094384 +0000 UTC m=+144.076738704" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.639764 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-2ghwn" podStartSLOduration=119.639755472 podStartE2EDuration="1m59.639755472s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.574192209 +0000 UTC m=+144.014836519" watchObservedRunningTime="2025-09-29 13:43:37.639755472 +0000 UTC m=+144.080399792" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.663969 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" event={"ID":"7db5686b-2de9-4e9b-99cc-80924f2c110a","Type":"ContainerStarted","Data":"b21ce78b5a1b97d1fb872b509f0b3714bf428590426c131d9728618eb6b5dfeb"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.676445 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.676834 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.176818932 +0000 UTC m=+144.617463252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.710122 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wfwrw" podStartSLOduration=119.710096041 podStartE2EDuration="1m59.710096041s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.709327984 +0000 UTC m=+144.149972314" watchObservedRunningTime="2025-09-29 13:43:37.710096041 +0000 UTC m=+144.150740351" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.714898 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" event={"ID":"63169d28-deb7-44a8-afd2-48f4fbb6fa69","Type":"ContainerStarted","Data":"98f4b0a4f42ea4f1ab6c130e92bd9b0976be23397ea6a41f476dc975fc6693a9"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.763078 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" podStartSLOduration=119.763033904 podStartE2EDuration="1m59.763033904s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.751025616 +0000 UTC m=+144.191669956" watchObservedRunningTime="2025-09-29 13:43:37.763033904 +0000 UTC m=+144.203678224" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.775986 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" event={"ID":"f1728b99-11ce-48cd-990b-d68af8a3f006","Type":"ContainerStarted","Data":"129216fff26340445066f3743941795f0d8bb1c7b633ae5f3231191b51914b03"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.777108 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.777332 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.277298201 +0000 UTC m=+144.717942521 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.778046 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.778069 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5hgz7" podStartSLOduration=119.778037156 podStartE2EDuration="1m59.778037156s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.776402899 +0000 UTC m=+144.217047219" watchObservedRunningTime="2025-09-29 13:43:37.778037156 +0000 UTC m=+144.218681476" Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.780423 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.280403089 +0000 UTC m=+144.721047409 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.784712 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" event={"ID":"773d63e6-d34c-4320-8cab-c77b91b3c8b2","Type":"ContainerStarted","Data":"abfefd990c064113287c76a2ed1a9122276e679288ec949c8fdf814e1b40b35c"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.811658 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k"] Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.811701 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" event={"ID":"cdc548b0-0411-4ae7-a8bf-df1d9808bf0d","Type":"ContainerStarted","Data":"87e3785a78f7d91f959cb1b82d596eca99396b4cc194ed5180d6e53d7939fc7f"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.813723 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rxvg9" event={"ID":"3d9c83b7-6dd1-4be7-ba30-7d030eae6524","Type":"ContainerStarted","Data":"2968021ff448b960c2aa1b7b8beb27f6d475a693a6fa7721712b1cc786df80b9"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.817555 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c76cl"] Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.834493 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-k7lp4"] Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.862988 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj"] Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.865888 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" event={"ID":"7cdb33ce-d579-40e7-934b-94666c8e7c27","Type":"ContainerStarted","Data":"204a7bb02b43eb45ae9958d01908259c21d7d6b198eb532b4fe883907a4b3920"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.879742 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" podStartSLOduration=120.879710566 podStartE2EDuration="2m0.879710566s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.870444534 +0000 UTC m=+144.311088854" watchObservedRunningTime="2025-09-29 13:43:37.879710566 +0000 UTC m=+144.320354886" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.880951 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.882334 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" event={"ID":"b5d3f667-2064-442a-a0a7-899f35a00d9f","Type":"ContainerStarted","Data":"c0dcf5bbc744051e53018a7954d66743c8c192451f18b67f263aa44cc6a07883"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.883675 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4"] Sep 29 13:43:37 crc kubenswrapper[4869]: E0929 13:43:37.883782 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.383759707 +0000 UTC m=+144.824404097 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.888670 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" event={"ID":"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a","Type":"ContainerStarted","Data":"144f86b24e5ca87cd434bbfb72ceb81aab62608a9b306a5f50c25d3ce4dd15f5"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.901223 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9"] Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.906199 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" event={"ID":"eb0bf99c-6af7-48b3-b415-016a751f526e","Type":"ContainerStarted","Data":"7d6ac268a12b48aa45dab22b8307464787f0659f7d1947e4e06d75fdfe75da1e"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.907888 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" event={"ID":"2d76b2fd-25de-456f-9217-f4585285248c","Type":"ContainerStarted","Data":"6b540b0210ba147459ed33a4fb900903df5db9d7e18026f9da73b7a8570873a3"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.956832 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b8gtg" event={"ID":"41996eeb-6d4e-45e1-b140-f2ff7f0bec29","Type":"ContainerStarted","Data":"896f621553f46b3ebaf14e0377ff4b2b32013874d381270782310c35b991603a"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.956871 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b8gtg" event={"ID":"41996eeb-6d4e-45e1-b140-f2ff7f0bec29","Type":"ContainerStarted","Data":"80c36b0e2d5184ed932e48be374c1a39b4641b06cb5b97e003a2b02124f996fd"} Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.970049 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.985367 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:37 crc kubenswrapper[4869]: I0929 13:43:37.992505 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-b8gtg" podStartSLOduration=119.992485923 podStartE2EDuration="1m59.992485923s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:37.991578581 +0000 UTC m=+144.432222911" watchObservedRunningTime="2025-09-29 13:43:37.992485923 +0000 UTC m=+144.433130243" Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.007009 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.506968307 +0000 UTC m=+144.947612627 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: W0929 13:43:38.045029 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25b39162_f5d9_4bef_90e5_e3ed59225936.slice/crio-852c8542f2e7737dab4c57de78773d69c45c78e27ff5c3d2b905f5ca457d29d6 WatchSource:0}: Error finding container 852c8542f2e7737dab4c57de78773d69c45c78e27ff5c3d2b905f5ca457d29d6: Status 404 returned error can't find the container with id 852c8542f2e7737dab4c57de78773d69c45c78e27ff5c3d2b905f5ca457d29d6 Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.078281 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc"] Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.086794 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.087149 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.587091657 +0000 UTC m=+145.027735977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.087511 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.087925 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.587918576 +0000 UTC m=+145.028562896 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.112111 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6"] Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.174478 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:38 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:38 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:38 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.174540 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.189236 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.189676 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.689657028 +0000 UTC m=+145.130301348 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.290647 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.291018 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.791003436 +0000 UTC m=+145.231647756 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.393168 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.393552 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.893527636 +0000 UTC m=+145.334171956 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.495102 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.495998 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:38.995983193 +0000 UTC m=+145.436627513 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.597014 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.597148 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.097127765 +0000 UTC m=+145.537772085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.597544 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.597882 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.097875021 +0000 UTC m=+145.538519341 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.698704 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.698922 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.198885118 +0000 UTC m=+145.639529438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.698979 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.699285 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.199269741 +0000 UTC m=+145.639914061 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.800509 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.800664 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.300642281 +0000 UTC m=+145.741286601 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.800906 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.801428 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.301402857 +0000 UTC m=+145.742047237 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.901735 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.902331 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.40229703 +0000 UTC m=+145.842941350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.902737 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:38 crc kubenswrapper[4869]: E0929 13:43:38.903301 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.403271304 +0000 UTC m=+145.843915824 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.989113 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" event={"ID":"cabc3961-9121-416f-962c-80558b14a820","Type":"ContainerStarted","Data":"8976ae581d41307b8ec3b5991fe35b581d1028092504038100aab35ea32656ee"} Sep 29 13:43:38 crc kubenswrapper[4869]: I0929 13:43:38.990586 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.003264 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.003322 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" event={"ID":"78378d8c-602a-4af2-ad85-c1e4330e959e","Type":"ContainerStarted","Data":"feebe9ccc6eb95c20998c1a7c75ef1fcb4d58bb6dc8c8b40e3d7255b1cbd7137"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.003866 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.004301 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.004663 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.504632613 +0000 UTC m=+145.945277133 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.036226 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" event={"ID":"d831b6b1-9e16-4bd3-88f7-7bed5f73206f","Type":"ContainerStarted","Data":"755446dda5837573fe162e8ac038b9c4ef81e4f75df79d179c77053c461f2243"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.040620 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" event={"ID":"63169d28-deb7-44a8-afd2-48f4fbb6fa69","Type":"ContainerStarted","Data":"152f23cd75fb235bbcda66e22210123d9fce3cc94084969734ca45f728193f3e"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.041867 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" event={"ID":"b5d3f667-2064-442a-a0a7-899f35a00d9f","Type":"ContainerStarted","Data":"53eb16da6b28c9281f2ae0c44673f0921b0abc5792782c94986814c1d33545d9"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.044304 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" event={"ID":"5f4028e9-fd1c-44d6-9811-43ef6e558655","Type":"ContainerStarted","Data":"30002f76aa153f351a994ed085298b9f8097d272f47053b7cbddeb3e35fc5e11"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.054816 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" event={"ID":"88d471bd-e253-4961-b6a3-4da4d15f7bc9","Type":"ContainerStarted","Data":"2114ad618e9e76688adc998f70db960da8b6b0169b594d0392558980ea2ee7b7"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.083964 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-zk45b" event={"ID":"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac","Type":"ContainerStarted","Data":"56fe23f7942b64ba002fd150b82e91697ee0816cf0c699d14dcdf9d769bcb36e"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.107714 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.110594 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.61056909 +0000 UTC m=+146.051213600 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.113122 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" event={"ID":"ecf75dd0-8b7e-4a13-8658-cb848ee6aeed","Type":"ContainerStarted","Data":"71a719772468a21bb26f9b295a28f25f46b17fcfb24f6b764165788d53b11f6f"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.116023 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" podStartSLOduration=122.11600709 podStartE2EDuration="2m2.11600709s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.025716047 +0000 UTC m=+145.466360367" watchObservedRunningTime="2025-09-29 13:43:39.11600709 +0000 UTC m=+145.556651410" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.169559 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-bzpd6" podStartSLOduration=121.169538404 podStartE2EDuration="2m1.169538404s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.115104168 +0000 UTC m=+145.555748488" watchObservedRunningTime="2025-09-29 13:43:39.169538404 +0000 UTC m=+145.610182714" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.169827 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s6jkm" podStartSLOduration=121.169822754 podStartE2EDuration="2m1.169822754s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.168385284 +0000 UTC m=+145.609029604" watchObservedRunningTime="2025-09-29 13:43:39.169822754 +0000 UTC m=+145.610467074" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.178636 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" event={"ID":"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef","Type":"ContainerStarted","Data":"cb6c3b7308f668541ed5fc838f7ec704aa66599e217f842f58bec865b2b70a8c"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.190567 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:39 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:39 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:39 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.190662 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.210917 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.211706 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.711686681 +0000 UTC m=+146.152331011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.219509 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" event={"ID":"d4c75357-03b7-433e-a45a-422bb2f54337","Type":"ContainerStarted","Data":"ddfc9eb01086470597b7b28bab26e2e5790161bc472be5825c771446b73e6998"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.240363 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" event={"ID":"80fc78d5-95e1-4be9-bb44-52692b4409db","Type":"ContainerStarted","Data":"78427f74a3aa313362608178f4ab91655b6d7cee4fa89a4b81514b1604b08096"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.299893 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" event={"ID":"25b39162-f5d9-4bef-90e5-e3ed59225936","Type":"ContainerStarted","Data":"852c8542f2e7737dab4c57de78773d69c45c78e27ff5c3d2b905f5ca457d29d6"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.308526 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-pq5fc" podStartSLOduration=121.308493382 podStartE2EDuration="2m1.308493382s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.289103037 +0000 UTC m=+145.729747357" watchObservedRunningTime="2025-09-29 13:43:39.308493382 +0000 UTC m=+145.749137692" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.311803 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-k7lp4" event={"ID":"1995f69c-86b1-47cc-b76e-69bc593e6520","Type":"ContainerStarted","Data":"7a645f427ac943508ecaaa8a590d0344777f18a3329a9577d46451471b0734b9"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.328298 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.334402 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.834383543 +0000 UTC m=+146.275027863 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.335870 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" event={"ID":"eb0bf99c-6af7-48b3-b415-016a751f526e","Type":"ContainerStarted","Data":"82b151236d3a9a959dbb110192b359ae2d6c84d9865dac36cd3c32a053602367"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.336829 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.339635 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" event={"ID":"d8cbc415-fef2-48a6-b66e-2afe6cd3be27","Type":"ContainerStarted","Data":"b52a08c922e752f191728def6706467baeb02fb2788e4bbb8368d4014f3885b8"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.342072 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rxvg9" event={"ID":"3d9c83b7-6dd1-4be7-ba30-7d030eae6524","Type":"ContainerStarted","Data":"06b8b7a47217fb043287e26f0126e6abd98b60be32ef6ee293fdedf494eaa356"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.354095 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" event={"ID":"82aa7000-191d-49ff-8a4c-e6f49b4946c3","Type":"ContainerStarted","Data":"ec8b631ccd36f52995ce8933866893add9378257f3b7fa69df676d26a2dbddd1"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.354162 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" event={"ID":"82aa7000-191d-49ff-8a4c-e6f49b4946c3","Type":"ContainerStarted","Data":"5b6063fd28639423695517602945058fc0e37d41301ba4a3e0539848259a1502"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.360056 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.370978 4869 patch_prober.go:28] interesting pod/apiserver-76f77b778f-s2kxs container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]log ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]etcd ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/start-apiserver-admission-initializer ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/generic-apiserver-start-informers ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/max-in-flight-filter ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/storage-object-count-tracker-hook ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/image.openshift.io-apiserver-caches ok Sep 29 13:43:39 crc kubenswrapper[4869]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Sep 29 13:43:39 crc kubenswrapper[4869]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/project.openshift.io-projectcache ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/openshift.io-startinformers ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/openshift.io-restmapperupdater ok Sep 29 13:43:39 crc kubenswrapper[4869]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Sep 29 13:43:39 crc kubenswrapper[4869]: livez check failed Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.371068 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" podUID="cdc548b0-0411-4ae7-a8bf-df1d9808bf0d" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.384594 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" podStartSLOduration=121.384572461 podStartE2EDuration="2m1.384572461s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.381166422 +0000 UTC m=+145.821810742" watchObservedRunningTime="2025-09-29 13:43:39.384572461 +0000 UTC m=+145.825216781" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.400794 4869 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-6w87k container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.400833 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" podUID="82aa7000-191d-49ff-8a4c-e6f49b4946c3" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.401072 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" event={"ID":"d633942f-d083-44fd-b0e0-1dce9b0fdf0b","Type":"ContainerStarted","Data":"bac57fe304365ef754f0dc2b7bddb939c8a94e0a4b654fa59fd73460f8cb9f65"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.401458 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.416585 4869 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-7467k container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.416687 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" podUID="d633942f-d083-44fd-b0e0-1dce9b0fdf0b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.430299 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.432648 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:39.932625644 +0000 UTC m=+146.373269964 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.450201 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-k2q4s" podStartSLOduration=121.450184685 podStartE2EDuration="2m1.450184685s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.449401878 +0000 UTC m=+145.890046198" watchObservedRunningTime="2025-09-29 13:43:39.450184685 +0000 UTC m=+145.890829005" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.488813 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" event={"ID":"bf20c45d-df7c-4122-a218-630d563e72c8","Type":"ContainerStarted","Data":"dbb42f55d29785751b1ff88ae95a6d75b8e3c85125866f732d9e9c5d59b9fa04"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.508069 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-k7lp4" podStartSLOduration=7.5080506 podStartE2EDuration="7.5080506s" podCreationTimestamp="2025-09-29 13:43:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.507147348 +0000 UTC m=+145.947791668" watchObservedRunningTime="2025-09-29 13:43:39.5080506 +0000 UTC m=+145.948694920" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.532185 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.532394 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" event={"ID":"2d76b2fd-25de-456f-9217-f4585285248c","Type":"ContainerStarted","Data":"c750b1694bf7a2f1da5ff17184316952693aefdd9694fd0dff379b0856ad7b95"} Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.532572 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.032558043 +0000 UTC m=+146.473202363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.558452 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" event={"ID":"d93f1bc3-9e18-4541-909a-7eb51a5fedd0","Type":"ContainerStarted","Data":"2bf7bd6d1b256e3a8e4816291d06ba3db5841deaafd383665211591316290166"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.558815 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-rxvg9" podStartSLOduration=7.558794247 podStartE2EDuration="7.558794247s" podCreationTimestamp="2025-09-29 13:43:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.557011064 +0000 UTC m=+145.997655404" watchObservedRunningTime="2025-09-29 13:43:39.558794247 +0000 UTC m=+145.999438567" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.558921 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.572565 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" event={"ID":"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886","Type":"ContainerStarted","Data":"e5ddcb3e96f662410a53a52d2f4b92a16a5dc8d51cb10ec88122500d7f4b20d7"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.574395 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.574586 4869 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-7trbl container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.29:6443/healthz\": dial tcp 10.217.0.29:6443: connect: connection refused" start-of-body= Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.574643 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" podUID="d93f1bc3-9e18-4541-909a-7eb51a5fedd0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.29:6443/healthz\": dial tcp 10.217.0.29:6443: connect: connection refused" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.575581 4869 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-fb4q6 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.575637 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" podUID="74ca8d3d-03dd-47e5-8d58-ded2cb2f9886" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.617210 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" event={"ID":"7cdb33ce-d579-40e7-934b-94666c8e7c27","Type":"ContainerStarted","Data":"cc85e2dc3639d6920ad272339bcd368e56c70e6c8898bc8af26d878730ce9794"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.622447 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" podStartSLOduration=121.622412302 podStartE2EDuration="2m1.622412302s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.602739577 +0000 UTC m=+146.043383897" watchObservedRunningTime="2025-09-29 13:43:39.622412302 +0000 UTC m=+146.063056622" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.633056 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.633327 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.13328934 +0000 UTC m=+146.573933660 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.633474 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.636022 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.135985354 +0000 UTC m=+146.576629854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.640781 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" podStartSLOduration=121.64076285 podStartE2EDuration="2m1.64076285s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.623104296 +0000 UTC m=+146.063748646" watchObservedRunningTime="2025-09-29 13:43:39.64076285 +0000 UTC m=+146.081407170" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.666734 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" event={"ID":"41fc0fbf-c0c9-431f-9f56-ef4296e6b25a","Type":"ContainerStarted","Data":"6b278f59ac8e31e17eaf60265ec10d7248785657735ced8418ae5bf5a7aae2a1"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.674117 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.690813 4869 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-8lrvm container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:5443/healthz\": dial tcp 10.217.0.17:5443: connect: connection refused" start-of-body= Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.690894 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" podUID="41fc0fbf-c0c9-431f-9f56-ef4296e6b25a" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.17:5443/healthz\": dial tcp 10.217.0.17:5443: connect: connection refused" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.691241 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" event={"ID":"ad6570a1-7f62-46b4-87e8-6ddea76c4101","Type":"ContainerStarted","Data":"68dbf10737cf0d926f39fb2036cd81a58a62db20bd94779178ad0459b21121a5"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.691296 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" event={"ID":"ad6570a1-7f62-46b4-87e8-6ddea76c4101","Type":"ContainerStarted","Data":"a9602c0e25bed123fbc7453703ddd35d90f9bfb8b36a108591865eafe1e4efc5"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.709355 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" event={"ID":"773d63e6-d34c-4320-8cab-c77b91b3c8b2","Type":"ContainerStarted","Data":"61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc"} Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.709422 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.709914 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-b8gtg" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.715767 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bs57s" podStartSLOduration=122.715734971 podStartE2EDuration="2m2.715734971s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.705888628 +0000 UTC m=+146.146532958" watchObservedRunningTime="2025-09-29 13:43:39.715734971 +0000 UTC m=+146.156379291" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.715934 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" podStartSLOduration=122.715926147 podStartE2EDuration="2m2.715926147s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.674377581 +0000 UTC m=+146.115021921" watchObservedRunningTime="2025-09-29 13:43:39.715926147 +0000 UTC m=+146.156570487" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.717417 4869 patch_prober.go:28] interesting pod/downloads-7954f5f757-b8gtg container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.717473 4869 patch_prober.go:28] interesting pod/console-operator-58897d9998-t7kkw container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.717509 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b8gtg" podUID="41996eeb-6d4e-45e1-b140-f2ff7f0bec29" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.717537 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" podUID="a61b3f02-628b-4dbf-bebe-499b9ee8b96f" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.725790 4869 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bwxhh container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.725882 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" podUID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.735078 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.737226 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.237193118 +0000 UTC m=+146.677837438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.761357 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" podStartSLOduration=121.761331828 podStartE2EDuration="2m1.761331828s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.760750288 +0000 UTC m=+146.201394608" watchObservedRunningTime="2025-09-29 13:43:39.761331828 +0000 UTC m=+146.201976148" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.845300 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.846097 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.346082679 +0000 UTC m=+146.786726999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.866426 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" podStartSLOduration=121.866404697 podStartE2EDuration="2m1.866404697s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.865529596 +0000 UTC m=+146.306173916" watchObservedRunningTime="2025-09-29 13:43:39.866404697 +0000 UTC m=+146.307049017" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.866946 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" podStartSLOduration=121.866937045 podStartE2EDuration="2m1.866937045s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.828673333 +0000 UTC m=+146.269317663" watchObservedRunningTime="2025-09-29 13:43:39.866937045 +0000 UTC m=+146.307581365" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.942385 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-k2ml4" podStartSLOduration=121.942352421 podStartE2EDuration="2m1.942352421s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.898336778 +0000 UTC m=+146.338981098" watchObservedRunningTime="2025-09-29 13:43:39.942352421 +0000 UTC m=+146.382996741" Sep 29 13:43:39 crc kubenswrapper[4869]: I0929 13:43:39.949922 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:39 crc kubenswrapper[4869]: E0929 13:43:39.950389 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.45036745 +0000 UTC m=+146.891011770 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.059260 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.059715 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.559701917 +0000 UTC m=+147.000346237 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.160173 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.160364 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.66032718 +0000 UTC m=+147.100971500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.160484 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.160941 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.660923961 +0000 UTC m=+147.101568281 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.174787 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:40 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:40 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:40 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.175204 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.261307 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.261878 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.761852265 +0000 UTC m=+147.202496585 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.384455 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.385186 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.885166099 +0000 UTC m=+147.325810419 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.486312 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.486780 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:40.986758186 +0000 UTC m=+147.427402506 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.587954 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.588731 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.088716386 +0000 UTC m=+147.529360706 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.689924 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.690393 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.190367025 +0000 UTC m=+147.631011345 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.715712 4869 generic.go:334] "Generic (PLEG): container finished" podID="7cdb33ce-d579-40e7-934b-94666c8e7c27" containerID="cc85e2dc3639d6920ad272339bcd368e56c70e6c8898bc8af26d878730ce9794" exitCode=0 Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.715791 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" event={"ID":"7cdb33ce-d579-40e7-934b-94666c8e7c27","Type":"ContainerDied","Data":"cc85e2dc3639d6920ad272339bcd368e56c70e6c8898bc8af26d878730ce9794"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.715823 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" event={"ID":"7cdb33ce-d579-40e7-934b-94666c8e7c27","Type":"ContainerStarted","Data":"320182bf362311346a9540a1c643adb5e7878d728cfea3a7da9b53f586ff55bc"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.718215 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-k7lp4" event={"ID":"1995f69c-86b1-47cc-b76e-69bc593e6520","Type":"ContainerStarted","Data":"bc8246a78a043933f50390c8b9feb677e077490f18dcf4f72e058d14aa9ecee7"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.720083 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jc6l9" event={"ID":"25b39162-f5d9-4bef-90e5-e3ed59225936","Type":"ContainerStarted","Data":"0c7a7c9739417471d67afbf504dcdd854e74c2d4b5b2c8cef128e6019ddd3ed5"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.721971 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" event={"ID":"f1728b99-11ce-48cd-990b-d68af8a3f006","Type":"ContainerStarted","Data":"d4e05941552369d2d16ccecffa725188e27e2f13d5539e2939e6e9f4dc974261"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.724050 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" event={"ID":"74ca8d3d-03dd-47e5-8d58-ded2cb2f9886","Type":"ContainerStarted","Data":"c44eb719034550f6a6f94f15748bb35c64f25db97772d9adfbb71a1f0cd5de26"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.724919 4869 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-fb4q6 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.724957 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" podUID="74ca8d3d-03dd-47e5-8d58-ded2cb2f9886" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.726359 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" event={"ID":"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef","Type":"ContainerStarted","Data":"e68a0968eec4d344e150053f2f923867667cb3a363b3087247dd81a79939e3cb"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.726385 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" event={"ID":"2df1a8bf-0930-4766-8fa1-e4f9d1cf60ef","Type":"ContainerStarted","Data":"f7e63bf3a9f5ea43c76dbcaf3c915074532c38f55af821131614d9fc6c43207f"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.729114 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" event={"ID":"88d471bd-e253-4961-b6a3-4da4d15f7bc9","Type":"ContainerStarted","Data":"0c50b51ec71601447b0348a889e4f777e81b413498502bcd465c2e4a588ba7dd"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.732048 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" event={"ID":"63169d28-deb7-44a8-afd2-48f4fbb6fa69","Type":"ContainerStarted","Data":"ba1c09630908bac29f680b95fadb6a0abbdb174699b22153ef77eae1accc970e"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.735146 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" event={"ID":"80fc78d5-95e1-4be9-bb44-52692b4409db","Type":"ContainerStarted","Data":"3a9439ea2560818e99a8e2dc09ed6d4ea9e5b38333bf75029be62fb64ddc0e72"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.735177 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" event={"ID":"80fc78d5-95e1-4be9-bb44-52692b4409db","Type":"ContainerStarted","Data":"d5f4108812d73dc210ef0ad59b32963128e5bcbadd19ef2980c73ea1a1599655"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.737288 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" event={"ID":"d831b6b1-9e16-4bd3-88f7-7bed5f73206f","Type":"ContainerStarted","Data":"9479c97b4434f620057414013f2760510efbf0a2e43770f38169961cf760f17d"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.739317 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-zk45b" event={"ID":"3c5dfe0a-0cc0-4941-8be4-48a3ee6b14ac","Type":"ContainerStarted","Data":"5d4cb59d63ef6ee64063cba3ab94b26f88d177e9dec52d229d13a2cda3fdde5c"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.739741 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.741453 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" event={"ID":"bf20c45d-df7c-4122-a218-630d563e72c8","Type":"ContainerStarted","Data":"20767f9c4a335d24351d90454774f72dfdbe0fbb1fccacfa7a8e660af5ce2475"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.741480 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" event={"ID":"bf20c45d-df7c-4122-a218-630d563e72c8","Type":"ContainerStarted","Data":"bbbfe571b18e2c41db00120c5cb3635b96ad1140411037da1dc7f975703e0b7f"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.743931 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" event={"ID":"5f4028e9-fd1c-44d6-9811-43ef6e558655","Type":"ContainerStarted","Data":"4bd6203f701429635cbf7096ce66de4e30f5c04ffe25ac2dd2f834ed0061cd9d"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.746805 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" event={"ID":"78378d8c-602a-4af2-ad85-c1e4330e959e","Type":"ContainerStarted","Data":"8fe03d06571c0f516e4f3a4f5f105d020886af2a35ea9ca258738855e6917db7"} Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.750853 4869 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-6w87k container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.750932 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" podUID="82aa7000-191d-49ff-8a4c-e6f49b4946c3" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.751753 4869 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bwxhh container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.751799 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" podUID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.752546 4869 patch_prober.go:28] interesting pod/downloads-7954f5f757-b8gtg container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.752639 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b8gtg" podUID="41996eeb-6d4e-45e1-b140-f2ff7f0bec29" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.785785 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" podStartSLOduration=122.785764796 podStartE2EDuration="2m2.785764796s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:40.7832765 +0000 UTC m=+147.223920820" watchObservedRunningTime="2025-09-29 13:43:40.785764796 +0000 UTC m=+147.226409116" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.786713 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" podStartSLOduration=122.786707399 podStartE2EDuration="2m2.786707399s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:39.940009539 +0000 UTC m=+146.380653859" watchObservedRunningTime="2025-09-29 13:43:40.786707399 +0000 UTC m=+147.227351719" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.791993 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.793057 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.29304316 +0000 UTC m=+147.733687480 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.817358 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-t7kkw" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.852176 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qvmsj" podStartSLOduration=122.852157958 podStartE2EDuration="2m2.852157958s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:40.850834932 +0000 UTC m=+147.291479252" watchObservedRunningTime="2025-09-29 13:43:40.852157958 +0000 UTC m=+147.292802278" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.852804 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-c76cl" podStartSLOduration=122.85279933 podStartE2EDuration="2m2.85279933s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:40.827904644 +0000 UTC m=+147.268548964" watchObservedRunningTime="2025-09-29 13:43:40.85279933 +0000 UTC m=+147.293443650" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.872328 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zszgw" podStartSLOduration=122.872305309 podStartE2EDuration="2m2.872305309s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:40.872174925 +0000 UTC m=+147.312819245" watchObservedRunningTime="2025-09-29 13:43:40.872305309 +0000 UTC m=+147.312949629" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.895038 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.896572 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.396549034 +0000 UTC m=+147.837193354 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.897720 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:40 crc kubenswrapper[4869]: E0929 13:43:40.898656 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.398646567 +0000 UTC m=+147.839290887 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.910379 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5swj" podStartSLOduration=122.910356894 podStartE2EDuration="2m2.910356894s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:40.908005512 +0000 UTC m=+147.348649832" watchObservedRunningTime="2025-09-29 13:43:40.910356894 +0000 UTC m=+147.351001214" Sep 29 13:43:40 crc kubenswrapper[4869]: I0929 13:43:40.999784 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:40.999971 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.499924413 +0000 UTC m=+147.940568733 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.000116 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.000508 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.500497433 +0000 UTC m=+147.941141763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.009499 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-zk45b" podStartSLOduration=9.009476965 podStartE2EDuration="9.009476965s" podCreationTimestamp="2025-09-29 13:43:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:40.959086321 +0000 UTC m=+147.399730651" watchObservedRunningTime="2025-09-29 13:43:41.009476965 +0000 UTC m=+147.450121285" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.010553 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" podStartSLOduration=124.010545563 podStartE2EDuration="2m4.010545563s" podCreationTimestamp="2025-09-29 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:41.009726994 +0000 UTC m=+147.450371334" watchObservedRunningTime="2025-09-29 13:43:41.010545563 +0000 UTC m=+147.451189883" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.101647 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.101875 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.601837671 +0000 UTC m=+148.042481991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.102036 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.102449 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.602432642 +0000 UTC m=+148.043076962 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.136590 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xnzv4" podStartSLOduration=123.13656965 podStartE2EDuration="2m3.13656965s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:41.076199318 +0000 UTC m=+147.516843638" watchObservedRunningTime="2025-09-29 13:43:41.13656965 +0000 UTC m=+147.577213970" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.168035 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-p52tc" podStartSLOduration=123.168014675 podStartE2EDuration="2m3.168014675s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:41.139076098 +0000 UTC m=+147.579720428" watchObservedRunningTime="2025-09-29 13:43:41.168014675 +0000 UTC m=+147.608658995" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.203930 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.204475 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.704455344 +0000 UTC m=+148.145099664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.216317 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:41 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:41 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:41 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.216378 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.283389 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-t6hkv" podStartSLOduration=123.283366201 podStartE2EDuration="2m3.283366201s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:41.209131997 +0000 UTC m=+147.649776327" watchObservedRunningTime="2025-09-29 13:43:41.283366201 +0000 UTC m=+147.724010511" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.308760 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.309177 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.80916 +0000 UTC m=+148.249804320 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.410315 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.410764 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:41.910742526 +0000 UTC m=+148.351386846 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.512188 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.512605 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.012592023 +0000 UTC m=+148.453236343 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.556516 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.616372 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.616842 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.116806911 +0000 UTC m=+148.557451231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.629738 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.718338 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.719579 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.219549848 +0000 UTC m=+148.660194168 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.752820 4869 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-8lrvm container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.752932 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" podUID="41fc0fbf-c0c9-431f-9f56-ef4296e6b25a" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.17:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.766675 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" event={"ID":"f1728b99-11ce-48cd-990b-d68af8a3f006","Type":"ContainerStarted","Data":"c45587f18ce9e86cda3da171ed0e9d75181ba3a2f45422d6771094bda52ae739"} Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.784475 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6w87k" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.795354 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-7jlsc" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.795683 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fb4q6" Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.820522 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.821712 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.321603411 +0000 UTC m=+148.762247731 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:41 crc kubenswrapper[4869]: I0929 13:43:41.927902 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:41 crc kubenswrapper[4869]: E0929 13:43:41.928777 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.428755782 +0000 UTC m=+148.869400102 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.034325 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.034541 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.534506704 +0000 UTC m=+148.975151024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.034767 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.034809 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.034889 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.035269 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.53525216 +0000 UTC m=+148.975896480 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.042772 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.055848 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.136349 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.136743 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.636703702 +0000 UTC m=+149.077348032 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.137025 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.137067 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.137095 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.138215 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.638201435 +0000 UTC m=+149.078845765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.150644 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.164518 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.177373 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:42 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:42 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:42 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.177451 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.238634 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.239186 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.73916448 +0000 UTC m=+149.179808800 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.271828 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8lrvm" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.272105 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.288526 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.340157 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.340551 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.840537679 +0000 UTC m=+149.281181999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.367167 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.443525 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.444427 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:42.944395895 +0000 UTC m=+149.385040215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.547727 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.583059 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.083037472 +0000 UTC m=+149.523681792 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.653392 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.653738 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.153716563 +0000 UTC m=+149.594360883 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.696006 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xgs6h"] Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.707395 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.711135 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.728204 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xgs6h"] Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.755279 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.756181 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.256163649 +0000 UTC m=+149.696807979 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.822556 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" event={"ID":"f1728b99-11ce-48cd-990b-d68af8a3f006","Type":"ContainerStarted","Data":"135e4a7e85058b52e9b55cf2764320d6fecc3860fa5309bdcd9f812a0ef09be1"} Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.839594 4869 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.856745 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.857067 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5mwz\" (UniqueName: \"kubernetes.io/projected/24810bda-9398-4992-a422-e0196cd215d7-kube-api-access-l5mwz\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.857132 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-catalog-content\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.857153 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-utilities\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.857404 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.357381934 +0000 UTC m=+149.798026254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.871984 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4xdz4"] Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.873151 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.886793 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Sep 29 13:43:42 crc kubenswrapper[4869]: W0929 13:43:42.918881 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-5a5d3294fffef577e67f5cf740fd3fe7bfef93330c34fa77fdb8d846dd376abb WatchSource:0}: Error finding container 5a5d3294fffef577e67f5cf740fd3fe7bfef93330c34fa77fdb8d846dd376abb: Status 404 returned error can't find the container with id 5a5d3294fffef577e67f5cf740fd3fe7bfef93330c34fa77fdb8d846dd376abb Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.958979 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-catalog-content\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.959436 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-utilities\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.960201 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.960291 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-catalog-content\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.960320 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5mwz\" (UniqueName: \"kubernetes.io/projected/24810bda-9398-4992-a422-e0196cd215d7-kube-api-access-l5mwz\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.960355 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxv6q\" (UniqueName: \"kubernetes.io/projected/a3c2054b-c189-4824-8fbf-e85af17c7ad1-kube-api-access-hxv6q\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.960404 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-utilities\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.960577 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-utilities\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.961804 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-catalog-content\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:42 crc kubenswrapper[4869]: E0929 13:43:42.962133 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.46211408 +0000 UTC m=+149.902758400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.966492 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4xdz4"] Sep 29 13:43:42 crc kubenswrapper[4869]: I0929 13:43:42.993453 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5mwz\" (UniqueName: \"kubernetes.io/projected/24810bda-9398-4992-a422-e0196cd215d7-kube-api-access-l5mwz\") pod \"community-operators-xgs6h\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.052288 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.064998 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:43 crc kubenswrapper[4869]: E0929 13:43:43.065132 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.565103796 +0000 UTC m=+150.005748116 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.065540 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.065583 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-catalog-content\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.065628 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxv6q\" (UniqueName: \"kubernetes.io/projected/a3c2054b-c189-4824-8fbf-e85af17c7ad1-kube-api-access-hxv6q\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.065649 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-utilities\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.066590 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-utilities\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:43 crc kubenswrapper[4869]: E0929 13:43:43.069804 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.569758538 +0000 UTC m=+150.010402908 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.070438 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-catalog-content\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.074471 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pzt44"] Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.075533 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.087452 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pzt44"] Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.098439 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxv6q\" (UniqueName: \"kubernetes.io/projected/a3c2054b-c189-4824-8fbf-e85af17c7ad1-kube-api-access-hxv6q\") pod \"certified-operators-4xdz4\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.167571 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.168123 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxb8b\" (UniqueName: \"kubernetes.io/projected/75480777-5b14-40ac-91cb-621a989462d0-kube-api-access-sxb8b\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.168223 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-utilities\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.168263 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-catalog-content\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: E0929 13:43:43.168407 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.668386642 +0000 UTC m=+150.109030952 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.175453 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:43 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:43 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:43 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.175493 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.248428 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.282432 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxb8b\" (UniqueName: \"kubernetes.io/projected/75480777-5b14-40ac-91cb-621a989462d0-kube-api-access-sxb8b\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.282638 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.282747 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-utilities\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.282788 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-catalog-content\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.283832 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-catalog-content\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: E0929 13:43:43.283856 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.783833691 +0000 UTC m=+150.224478211 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cl5ps" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.284155 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-utilities\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.294434 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r9gmm"] Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.295814 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.315328 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r9gmm"] Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.317696 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxb8b\" (UniqueName: \"kubernetes.io/projected/75480777-5b14-40ac-91cb-621a989462d0-kube-api-access-sxb8b\") pod \"community-operators-pzt44\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.390717 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.391649 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps9kr\" (UniqueName: \"kubernetes.io/projected/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-kube-api-access-ps9kr\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.391711 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-catalog-content\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.391745 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-utilities\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: E0929 13:43:43.391825 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-09-29 13:43:43.89178057 +0000 UTC m=+150.332424900 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.402127 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.418630 4869 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-09-29T13:43:42.839651086Z","Handler":null,"Name":""} Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.437987 4869 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.438036 4869 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.493471 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps9kr\" (UniqueName: \"kubernetes.io/projected/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-kube-api-access-ps9kr\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.493527 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-catalog-content\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.493557 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-utilities\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.493632 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.494732 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-catalog-content\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.494957 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-utilities\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.531729 4869 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.531779 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.560552 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps9kr\" (UniqueName: \"kubernetes.io/projected/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-kube-api-access-ps9kr\") pod \"certified-operators-r9gmm\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.636565 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xgs6h"] Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.646351 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cl5ps\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.655123 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:43:43 crc kubenswrapper[4869]: W0929 13:43:43.692801 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24810bda_9398_4992_a422_e0196cd215d7.slice/crio-6a72bdfffba642fe7c930b65c1ebb9c88c4892b12cbdd30aa8d9f03d357b4ba0 WatchSource:0}: Error finding container 6a72bdfffba642fe7c930b65c1ebb9c88c4892b12cbdd30aa8d9f03d357b4ba0: Status 404 returned error can't find the container with id 6a72bdfffba642fe7c930b65c1ebb9c88c4892b12cbdd30aa8d9f03d357b4ba0 Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.703093 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.739326 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.775032 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.866383 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgs6h" event={"ID":"24810bda-9398-4992-a422-e0196cd215d7","Type":"ContainerStarted","Data":"6a72bdfffba642fe7c930b65c1ebb9c88c4892b12cbdd30aa8d9f03d357b4ba0"} Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.879261 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"be3ef467cb65552fe27461d958b591a050302feea6bce6c8964753f94214c5d2"} Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.879323 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"d98c06a4d6a0c2da23f2b7703d8a1290b24678e3a93961b84af86a221413aaad"} Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.893987 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" event={"ID":"f1728b99-11ce-48cd-990b-d68af8a3f006","Type":"ContainerStarted","Data":"8aa976a9a80b5646539b271ad30168e62d43c080f5689d45102cc9a0756ea5bb"} Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.899759 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"88594168a4d7ebac97a80ae0d7f23731c8abe9be505bb3cbd22ffad80e856e82"} Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.899806 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5a5d3294fffef577e67f5cf740fd3fe7bfef93330c34fa77fdb8d846dd376abb"} Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.900391 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.903193 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pzt44"] Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.910971 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6c43dd8678db89f4de64be40869d5d92a6f32f04e4f626a7187ee6a2aa005bc7"} Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.911039 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"87f79f09d2a0ec445e3a31bb9304489aaf9f419aac9d7d82ad13352f8d79fd8b"} Sep 29 13:43:43 crc kubenswrapper[4869]: W0929 13:43:43.911678 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75480777_5b14_40ac_91cb_621a989462d0.slice/crio-4438a1d18e062cb54a4e70adab1273b6bb054cb4f058f7622919b91ee5977f2c WatchSource:0}: Error finding container 4438a1d18e062cb54a4e70adab1273b6bb054cb4f058f7622919b91ee5977f2c: Status 404 returned error can't find the container with id 4438a1d18e062cb54a4e70adab1273b6bb054cb4f058f7622919b91ee5977f2c Sep 29 13:43:43 crc kubenswrapper[4869]: I0929 13:43:43.934680 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-2gjfj" podStartSLOduration=11.934645051 podStartE2EDuration="11.934645051s" podCreationTimestamp="2025-09-29 13:43:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:43.922457217 +0000 UTC m=+150.363101537" watchObservedRunningTime="2025-09-29 13:43:43.934645051 +0000 UTC m=+150.375289371" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.004273 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4xdz4"] Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.014171 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.021555 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-s2kxs" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.146092 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r9gmm"] Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.182658 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:44 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:44 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:44 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.182745 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.208758 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.209851 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.222225 4869 patch_prober.go:28] interesting pod/console-f9d7485db-m2zpm container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.5:8443/health\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.222301 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-m2zpm" podUID="ac14296d-ce5b-4b73-84f6-3f39e3280f26" containerName="console" probeResult="failure" output="Get \"https://10.217.0.5:8443/health\": dial tcp 10.217.0.5:8443: connect: connection refused" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.268741 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.410542 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cl5ps"] Sep 29 13:43:44 crc kubenswrapper[4869]: W0929 13:43:44.502650 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda39dcbb0_84e5_458c_9a0a_6d3388f423df.slice/crio-e11ea99a75745a39a923c099f96585d31380e451f13c59fd707d5494e7f5e992 WatchSource:0}: Error finding container e11ea99a75745a39a923c099f96585d31380e451f13c59fd707d5494e7f5e992: Status 404 returned error can't find the container with id e11ea99a75745a39a923c099f96585d31380e451f13c59fd707d5494e7f5e992 Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.680368 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tnwxz"] Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.682512 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.685734 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.697683 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tnwxz"] Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.844958 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rzhk\" (UniqueName: \"kubernetes.io/projected/e983aa6b-2924-498a-a957-6dce64d318a0-kube-api-access-8rzhk\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.845033 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-catalog-content\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.845074 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-utilities\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.910812 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.911799 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.922602 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.925538 4869 generic.go:334] "Generic (PLEG): container finished" podID="75480777-5b14-40ac-91cb-621a989462d0" containerID="64a53bdee48065ef91aecd4386bb41a18869b6a135e7c13c1bdbe589974c0e64" exitCode=0 Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.925637 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pzt44" event={"ID":"75480777-5b14-40ac-91cb-621a989462d0","Type":"ContainerDied","Data":"64a53bdee48065ef91aecd4386bb41a18869b6a135e7c13c1bdbe589974c0e64"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.925675 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pzt44" event={"ID":"75480777-5b14-40ac-91cb-621a989462d0","Type":"ContainerStarted","Data":"4438a1d18e062cb54a4e70adab1273b6bb054cb4f058f7622919b91ee5977f2c"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.927888 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.927952 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.928201 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.930846 4869 generic.go:334] "Generic (PLEG): container finished" podID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerID="25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526" exitCode=0 Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.930937 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4xdz4" event={"ID":"a3c2054b-c189-4824-8fbf-e85af17c7ad1","Type":"ContainerDied","Data":"25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.930973 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4xdz4" event={"ID":"a3c2054b-c189-4824-8fbf-e85af17c7ad1","Type":"ContainerStarted","Data":"5ef4fba5d8585b75535a652e785acf529f2e5036ba2b8f8822343f1363017d1a"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.944078 4869 generic.go:334] "Generic (PLEG): container finished" podID="d831b6b1-9e16-4bd3-88f7-7bed5f73206f" containerID="9479c97b4434f620057414013f2760510efbf0a2e43770f38169961cf760f17d" exitCode=0 Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.944230 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" event={"ID":"d831b6b1-9e16-4bd3-88f7-7bed5f73206f","Type":"ContainerDied","Data":"9479c97b4434f620057414013f2760510efbf0a2e43770f38169961cf760f17d"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.945982 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rzhk\" (UniqueName: \"kubernetes.io/projected/e983aa6b-2924-498a-a957-6dce64d318a0-kube-api-access-8rzhk\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.946082 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-catalog-content\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.946136 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-utilities\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.946784 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-utilities\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.947449 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-catalog-content\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.957241 4869 generic.go:334] "Generic (PLEG): container finished" podID="24810bda-9398-4992-a422-e0196cd215d7" containerID="34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08" exitCode=0 Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.957377 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgs6h" event={"ID":"24810bda-9398-4992-a422-e0196cd215d7","Type":"ContainerDied","Data":"34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.965120 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" event={"ID":"a39dcbb0-84e5-458c-9a0a-6d3388f423df","Type":"ContainerStarted","Data":"b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.965199 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" event={"ID":"a39dcbb0-84e5-458c-9a0a-6d3388f423df","Type":"ContainerStarted","Data":"e11ea99a75745a39a923c099f96585d31380e451f13c59fd707d5494e7f5e992"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.966682 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.976999 4869 generic.go:334] "Generic (PLEG): container finished" podID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerID="b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601" exitCode=0 Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.977130 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9gmm" event={"ID":"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e","Type":"ContainerDied","Data":"b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.977243 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9gmm" event={"ID":"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e","Type":"ContainerStarted","Data":"2f43ff58e44364991d2347d72565b06a4ce963fbd038aa91f58b0a5e5905dbd9"} Sep 29 13:43:44 crc kubenswrapper[4869]: I0929 13:43:44.980860 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rzhk\" (UniqueName: \"kubernetes.io/projected/e983aa6b-2924-498a-a957-6dce64d318a0-kube-api-access-8rzhk\") pod \"redhat-marketplace-tnwxz\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.004036 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.030392 4869 patch_prober.go:28] interesting pod/downloads-7954f5f757-b8gtg container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.030474 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b8gtg" podUID="41996eeb-6d4e-45e1-b140-f2ff7f0bec29" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.031002 4869 patch_prober.go:28] interesting pod/downloads-7954f5f757-b8gtg container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.031035 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b8gtg" podUID="41996eeb-6d4e-45e1-b140-f2ff7f0bec29" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.042306 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.042379 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.049016 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36518473-1273-490e-97d4-d24d7ccdb636-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"36518473-1273-490e-97d4-d24d7ccdb636\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.049171 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/36518473-1273-490e-97d4-d24d7ccdb636-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"36518473-1273-490e-97d4-d24d7ccdb636\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.054079 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.080040 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" podStartSLOduration=127.08001776 podStartE2EDuration="2m7.08001776s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:45.07110931 +0000 UTC m=+151.511753630" watchObservedRunningTime="2025-09-29 13:43:45.08001776 +0000 UTC m=+151.520662080" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.081013 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p56nh"] Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.082760 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.098336 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p56nh"] Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.150881 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36518473-1273-490e-97d4-d24d7ccdb636-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"36518473-1273-490e-97d4-d24d7ccdb636\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.151795 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/36518473-1273-490e-97d4-d24d7ccdb636-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"36518473-1273-490e-97d4-d24d7ccdb636\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.170417 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.174548 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/36518473-1273-490e-97d4-d24d7ccdb636-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"36518473-1273-490e-97d4-d24d7ccdb636\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.187761 4869 patch_prober.go:28] interesting pod/router-default-5444994796-2ghwn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Sep 29 13:43:45 crc kubenswrapper[4869]: [-]has-synced failed: reason withheld Sep 29 13:43:45 crc kubenswrapper[4869]: [+]process-running ok Sep 29 13:43:45 crc kubenswrapper[4869]: healthz check failed Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.187879 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2ghwn" podUID="e47976d3-2e57-4943-a744-75ef0accd1ec" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.208427 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36518473-1273-490e-97d4-d24d7ccdb636-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"36518473-1273-490e-97d4-d24d7ccdb636\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.228114 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.253316 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-catalog-content\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.253473 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x9p9\" (UniqueName: \"kubernetes.io/projected/e05d5f70-7b18-422e-8862-a36aa7eb47ad-kube-api-access-4x9p9\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.253526 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-utilities\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.354641 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-catalog-content\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.354709 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x9p9\" (UniqueName: \"kubernetes.io/projected/e05d5f70-7b18-422e-8862-a36aa7eb47ad-kube-api-access-4x9p9\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.354729 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-utilities\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.355225 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-utilities\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.356564 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-catalog-content\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.357734 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tnwxz"] Sep 29 13:43:45 crc kubenswrapper[4869]: W0929 13:43:45.363472 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode983aa6b_2924_498a_a957_6dce64d318a0.slice/crio-950ffd415b7a293d8cd02118c8ba01fd362033ac1f77800087ae8e14684a0180 WatchSource:0}: Error finding container 950ffd415b7a293d8cd02118c8ba01fd362033ac1f77800087ae8e14684a0180: Status 404 returned error can't find the container with id 950ffd415b7a293d8cd02118c8ba01fd362033ac1f77800087ae8e14684a0180 Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.379677 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x9p9\" (UniqueName: \"kubernetes.io/projected/e05d5f70-7b18-422e-8862-a36aa7eb47ad-kube-api-access-4x9p9\") pod \"redhat-marketplace-p56nh\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.405548 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.683494 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.789056 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p56nh"] Sep 29 13:43:45 crc kubenswrapper[4869]: W0929 13:43:45.811926 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode05d5f70_7b18_422e_8862_a36aa7eb47ad.slice/crio-15e68da78f5390b86f95964f1b05ade20a8f1d27ca4fb9b45f92c2cde5335a91 WatchSource:0}: Error finding container 15e68da78f5390b86f95964f1b05ade20a8f1d27ca4fb9b45f92c2cde5335a91: Status 404 returned error can't find the container with id 15e68da78f5390b86f95964f1b05ade20a8f1d27ca4fb9b45f92c2cde5335a91 Sep 29 13:43:45 crc kubenswrapper[4869]: I0929 13:43:45.855510 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.011549 4869 generic.go:334] "Generic (PLEG): container finished" podID="e983aa6b-2924-498a-a957-6dce64d318a0" containerID="4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321" exitCode=0 Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.011645 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tnwxz" event={"ID":"e983aa6b-2924-498a-a957-6dce64d318a0","Type":"ContainerDied","Data":"4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321"} Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.011679 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tnwxz" event={"ID":"e983aa6b-2924-498a-a957-6dce64d318a0","Type":"ContainerStarted","Data":"950ffd415b7a293d8cd02118c8ba01fd362033ac1f77800087ae8e14684a0180"} Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.020103 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"36518473-1273-490e-97d4-d24d7ccdb636","Type":"ContainerStarted","Data":"53219c7f1d7d574f61bff4fb4ebfbcdce55c20fafcb7b9e0bb97735f947f355a"} Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.026448 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p56nh" event={"ID":"e05d5f70-7b18-422e-8862-a36aa7eb47ad","Type":"ContainerStarted","Data":"15e68da78f5390b86f95964f1b05ade20a8f1d27ca4fb9b45f92c2cde5335a91"} Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.032565 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-kkrrb" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.117495 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-np8tv"] Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.118996 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.124202 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-np8tv"] Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.128369 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.173913 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.176125 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-utilities\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.176271 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr6k5\" (UniqueName: \"kubernetes.io/projected/209e6085-24df-4c1f-ba21-016aee31035b-kube-api-access-cr6k5\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.176343 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-catalog-content\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.184061 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-2ghwn" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.280433 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-utilities\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.280540 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr6k5\" (UniqueName: \"kubernetes.io/projected/209e6085-24df-4c1f-ba21-016aee31035b-kube-api-access-cr6k5\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.280593 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-catalog-content\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.282272 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-utilities\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.282442 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-catalog-content\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.313486 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr6k5\" (UniqueName: \"kubernetes.io/projected/209e6085-24df-4c1f-ba21-016aee31035b-kube-api-access-cr6k5\") pod \"redhat-operators-np8tv\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.460120 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.475315 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4f8ds"] Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.476872 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.491432 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4f8ds"] Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.553526 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.590187 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-secret-volume\") pod \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.590378 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-config-volume\") pod \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.590483 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9npd\" (UniqueName: \"kubernetes.io/projected/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-kube-api-access-c9npd\") pod \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\" (UID: \"d831b6b1-9e16-4bd3-88f7-7bed5f73206f\") " Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.590738 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmkh8\" (UniqueName: \"kubernetes.io/projected/7bc1173a-d614-4277-968b-4139f82c942d-kube-api-access-lmkh8\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.590884 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-catalog-content\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.590925 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-utilities\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.593957 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-config-volume" (OuterVolumeSpecName: "config-volume") pod "d831b6b1-9e16-4bd3-88f7-7bed5f73206f" (UID: "d831b6b1-9e16-4bd3-88f7-7bed5f73206f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.601910 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-kube-api-access-c9npd" (OuterVolumeSpecName: "kube-api-access-c9npd") pod "d831b6b1-9e16-4bd3-88f7-7bed5f73206f" (UID: "d831b6b1-9e16-4bd3-88f7-7bed5f73206f"). InnerVolumeSpecName "kube-api-access-c9npd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.604282 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d831b6b1-9e16-4bd3-88f7-7bed5f73206f" (UID: "d831b6b1-9e16-4bd3-88f7-7bed5f73206f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.694228 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-catalog-content\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.694282 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-utilities\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.694304 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmkh8\" (UniqueName: \"kubernetes.io/projected/7bc1173a-d614-4277-968b-4139f82c942d-kube-api-access-lmkh8\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.694416 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9npd\" (UniqueName: \"kubernetes.io/projected/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-kube-api-access-c9npd\") on node \"crc\" DevicePath \"\"" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.694434 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.694449 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d831b6b1-9e16-4bd3-88f7-7bed5f73206f-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.696205 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-catalog-content\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.696442 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-utilities\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.734335 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmkh8\" (UniqueName: \"kubernetes.io/projected/7bc1173a-d614-4277-968b-4139f82c942d-kube-api-access-lmkh8\") pod \"redhat-operators-4f8ds\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.805150 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:43:46 crc kubenswrapper[4869]: I0929 13:43:46.897707 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-np8tv"] Sep 29 13:43:46 crc kubenswrapper[4869]: W0929 13:43:46.931872 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod209e6085_24df_4c1f_ba21_016aee31035b.slice/crio-4d85a0ccefa09e8373a342b19ac86996189faa06cf9f8e0225267a62afef1c24 WatchSource:0}: Error finding container 4d85a0ccefa09e8373a342b19ac86996189faa06cf9f8e0225267a62afef1c24: Status 404 returned error can't find the container with id 4d85a0ccefa09e8373a342b19ac86996189faa06cf9f8e0225267a62afef1c24 Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.048000 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" event={"ID":"d831b6b1-9e16-4bd3-88f7-7bed5f73206f","Type":"ContainerDied","Data":"755446dda5837573fe162e8ac038b9c4ef81e4f75df79d179c77053c461f2243"} Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.048057 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="755446dda5837573fe162e8ac038b9c4ef81e4f75df79d179c77053c461f2243" Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.048077 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj" Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.059194 4869 generic.go:334] "Generic (PLEG): container finished" podID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerID="0241e307a1e9f7f89e9d66a5336b657f79ac3d8f29be6f9b06322db705cf425f" exitCode=0 Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.059288 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p56nh" event={"ID":"e05d5f70-7b18-422e-8862-a36aa7eb47ad","Type":"ContainerDied","Data":"0241e307a1e9f7f89e9d66a5336b657f79ac3d8f29be6f9b06322db705cf425f"} Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.068216 4869 generic.go:334] "Generic (PLEG): container finished" podID="36518473-1273-490e-97d4-d24d7ccdb636" containerID="e9bf1a53dfd7678a246e7736cf96ac4224c8fb49f47df4b5f71128098841f275" exitCode=0 Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.068292 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"36518473-1273-490e-97d4-d24d7ccdb636","Type":"ContainerDied","Data":"e9bf1a53dfd7678a246e7736cf96ac4224c8fb49f47df4b5f71128098841f275"} Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.071049 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np8tv" event={"ID":"209e6085-24df-4c1f-ba21-016aee31035b","Type":"ContainerStarted","Data":"4d85a0ccefa09e8373a342b19ac86996189faa06cf9f8e0225267a62afef1c24"} Sep 29 13:43:47 crc kubenswrapper[4869]: I0929 13:43:47.256514 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4f8ds"] Sep 29 13:43:47 crc kubenswrapper[4869]: W0929 13:43:47.300709 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7bc1173a_d614_4277_968b_4139f82c942d.slice/crio-49e640c9dd228cbbb8c2d392f7592cfda03bf9419d15e35c500439eeee52d4c2 WatchSource:0}: Error finding container 49e640c9dd228cbbb8c2d392f7592cfda03bf9419d15e35c500439eeee52d4c2: Status 404 returned error can't find the container with id 49e640c9dd228cbbb8c2d392f7592cfda03bf9419d15e35c500439eeee52d4c2 Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.115772 4869 generic.go:334] "Generic (PLEG): container finished" podID="7bc1173a-d614-4277-968b-4139f82c942d" containerID="b1d922c82f75f0c7ae56f8b549e242f7de2525a453d26754dca95c669c0ad1a6" exitCode=0 Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.115911 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4f8ds" event={"ID":"7bc1173a-d614-4277-968b-4139f82c942d","Type":"ContainerDied","Data":"b1d922c82f75f0c7ae56f8b549e242f7de2525a453d26754dca95c669c0ad1a6"} Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.115950 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4f8ds" event={"ID":"7bc1173a-d614-4277-968b-4139f82c942d","Type":"ContainerStarted","Data":"49e640c9dd228cbbb8c2d392f7592cfda03bf9419d15e35c500439eeee52d4c2"} Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.158716 4869 generic.go:334] "Generic (PLEG): container finished" podID="209e6085-24df-4c1f-ba21-016aee31035b" containerID="20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d" exitCode=0 Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.158909 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np8tv" event={"ID":"209e6085-24df-4c1f-ba21-016aee31035b","Type":"ContainerDied","Data":"20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d"} Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.512061 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.649205 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36518473-1273-490e-97d4-d24d7ccdb636-kube-api-access\") pod \"36518473-1273-490e-97d4-d24d7ccdb636\" (UID: \"36518473-1273-490e-97d4-d24d7ccdb636\") " Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.650088 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/36518473-1273-490e-97d4-d24d7ccdb636-kubelet-dir\") pod \"36518473-1273-490e-97d4-d24d7ccdb636\" (UID: \"36518473-1273-490e-97d4-d24d7ccdb636\") " Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.650157 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/36518473-1273-490e-97d4-d24d7ccdb636-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "36518473-1273-490e-97d4-d24d7ccdb636" (UID: "36518473-1273-490e-97d4-d24d7ccdb636"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.652834 4869 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/36518473-1273-490e-97d4-d24d7ccdb636-kubelet-dir\") on node \"crc\" DevicePath \"\"" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.663885 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36518473-1273-490e-97d4-d24d7ccdb636-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "36518473-1273-490e-97d4-d24d7ccdb636" (UID: "36518473-1273-490e-97d4-d24d7ccdb636"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.754103 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36518473-1273-490e-97d4-d24d7ccdb636-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.985571 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Sep 29 13:43:48 crc kubenswrapper[4869]: E0929 13:43:48.985896 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36518473-1273-490e-97d4-d24d7ccdb636" containerName="pruner" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.985910 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="36518473-1273-490e-97d4-d24d7ccdb636" containerName="pruner" Sep 29 13:43:48 crc kubenswrapper[4869]: E0929 13:43:48.985922 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d831b6b1-9e16-4bd3-88f7-7bed5f73206f" containerName="collect-profiles" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.985931 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d831b6b1-9e16-4bd3-88f7-7bed5f73206f" containerName="collect-profiles" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.986091 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d831b6b1-9e16-4bd3-88f7-7bed5f73206f" containerName="collect-profiles" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.986109 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="36518473-1273-490e-97d4-d24d7ccdb636" containerName="pruner" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.986657 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.988750 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Sep 29 13:43:48 crc kubenswrapper[4869]: I0929 13:43:48.989094 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.002362 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.062811 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.062891 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.164932 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.164988 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.165074 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.175372 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"36518473-1273-490e-97d4-d24d7ccdb636","Type":"ContainerDied","Data":"53219c7f1d7d574f61bff4fb4ebfbcdce55c20fafcb7b9e0bb97735f947f355a"} Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.175421 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53219c7f1d7d574f61bff4fb4ebfbcdce55c20fafcb7b9e0bb97735f947f355a" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.175508 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.189587 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.359535 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.787102 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Sep 29 13:43:49 crc kubenswrapper[4869]: I0929 13:43:49.825098 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-zk45b" Sep 29 13:43:50 crc kubenswrapper[4869]: I0929 13:43:50.212535 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ef675a06-3b4a-4496-919a-cb6cc1b5d49d","Type":"ContainerStarted","Data":"cfbadca2545090ba1ac060bbd330cd303143a61d37652063047dc1ddc2306709"} Sep 29 13:43:50 crc kubenswrapper[4869]: I0929 13:43:50.658169 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:43:50 crc kubenswrapper[4869]: I0929 13:43:50.658256 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:43:51 crc kubenswrapper[4869]: I0929 13:43:51.234592 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ef675a06-3b4a-4496-919a-cb6cc1b5d49d","Type":"ContainerStarted","Data":"9d56480d99a1af981bb3a320dd0b60d6de51eb10e11b1af914dce047f91419e2"} Sep 29 13:43:51 crc kubenswrapper[4869]: I0929 13:43:51.263717 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.263659756 podStartE2EDuration="3.263659756s" podCreationTimestamp="2025-09-29 13:43:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:43:51.257952857 +0000 UTC m=+157.698597177" watchObservedRunningTime="2025-09-29 13:43:51.263659756 +0000 UTC m=+157.704304096" Sep 29 13:43:52 crc kubenswrapper[4869]: I0929 13:43:52.254745 4869 generic.go:334] "Generic (PLEG): container finished" podID="ef675a06-3b4a-4496-919a-cb6cc1b5d49d" containerID="9d56480d99a1af981bb3a320dd0b60d6de51eb10e11b1af914dce047f91419e2" exitCode=0 Sep 29 13:43:52 crc kubenswrapper[4869]: I0929 13:43:52.279996 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ef675a06-3b4a-4496-919a-cb6cc1b5d49d","Type":"ContainerDied","Data":"9d56480d99a1af981bb3a320dd0b60d6de51eb10e11b1af914dce047f91419e2"} Sep 29 13:43:52 crc kubenswrapper[4869]: I0929 13:43:52.872198 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:43:54 crc kubenswrapper[4869]: I0929 13:43:54.276154 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:54 crc kubenswrapper[4869]: I0929 13:43:54.282231 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:43:55 crc kubenswrapper[4869]: I0929 13:43:55.027960 4869 patch_prober.go:28] interesting pod/downloads-7954f5f757-b8gtg container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 29 13:43:55 crc kubenswrapper[4869]: I0929 13:43:55.028029 4869 patch_prober.go:28] interesting pod/downloads-7954f5f757-b8gtg container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Sep 29 13:43:55 crc kubenswrapper[4869]: I0929 13:43:55.028039 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b8gtg" podUID="41996eeb-6d4e-45e1-b140-f2ff7f0bec29" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 29 13:43:55 crc kubenswrapper[4869]: I0929 13:43:55.028114 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b8gtg" podUID="41996eeb-6d4e-45e1-b140-f2ff7f0bec29" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Sep 29 13:44:00 crc kubenswrapper[4869]: I0929 13:44:00.377897 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:44:00 crc kubenswrapper[4869]: I0929 13:44:00.385158 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d791a01-f367-41f9-bd94-a7cee0b4b7c7-metrics-certs\") pod \"network-metrics-daemon-mxqkf\" (UID: \"9d791a01-f367-41f9-bd94-a7cee0b4b7c7\") " pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:44:00 crc kubenswrapper[4869]: I0929 13:44:00.656871 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mxqkf" Sep 29 13:44:03 crc kubenswrapper[4869]: I0929 13:44:03.788143 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:44:05 crc kubenswrapper[4869]: I0929 13:44:05.036323 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-b8gtg" Sep 29 13:44:14 crc kubenswrapper[4869]: I0929 13:44:14.877930 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7qjv7" Sep 29 13:44:15 crc kubenswrapper[4869]: I0929 13:44:15.905653 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:44:15 crc kubenswrapper[4869]: I0929 13:44:15.924947 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kube-api-access\") pod \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\" (UID: \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\") " Sep 29 13:44:15 crc kubenswrapper[4869]: I0929 13:44:15.925405 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kubelet-dir\") pod \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\" (UID: \"ef675a06-3b4a-4496-919a-cb6cc1b5d49d\") " Sep 29 13:44:15 crc kubenswrapper[4869]: I0929 13:44:15.925748 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ef675a06-3b4a-4496-919a-cb6cc1b5d49d" (UID: "ef675a06-3b4a-4496-919a-cb6cc1b5d49d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:44:15 crc kubenswrapper[4869]: I0929 13:44:15.935137 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ef675a06-3b4a-4496-919a-cb6cc1b5d49d" (UID: "ef675a06-3b4a-4496-919a-cb6cc1b5d49d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:44:16 crc kubenswrapper[4869]: I0929 13:44:16.026282 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kube-api-access\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:16 crc kubenswrapper[4869]: I0929 13:44:16.026322 4869 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ef675a06-3b4a-4496-919a-cb6cc1b5d49d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:16 crc kubenswrapper[4869]: I0929 13:44:16.469822 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ef675a06-3b4a-4496-919a-cb6cc1b5d49d","Type":"ContainerDied","Data":"cfbadca2545090ba1ac060bbd330cd303143a61d37652063047dc1ddc2306709"} Sep 29 13:44:16 crc kubenswrapper[4869]: I0929 13:44:16.469886 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cfbadca2545090ba1ac060bbd330cd303143a61d37652063047dc1ddc2306709" Sep 29 13:44:16 crc kubenswrapper[4869]: I0929 13:44:16.469925 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Sep 29 13:44:19 crc kubenswrapper[4869]: E0929 13:44:19.931294 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Sep 29 13:44:19 crc kubenswrapper[4869]: E0929 13:44:19.932480 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sxb8b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-pzt44_openshift-marketplace(75480777-5b14-40ac-91cb-621a989462d0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:44:19 crc kubenswrapper[4869]: E0929 13:44:19.933749 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-pzt44" podUID="75480777-5b14-40ac-91cb-621a989462d0" Sep 29 13:44:19 crc kubenswrapper[4869]: E0929 13:44:19.978059 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Sep 29 13:44:19 crc kubenswrapper[4869]: E0929 13:44:19.978257 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l5mwz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-xgs6h_openshift-marketplace(24810bda-9398-4992-a422-e0196cd215d7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:44:19 crc kubenswrapper[4869]: E0929 13:44:19.979453 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-xgs6h" podUID="24810bda-9398-4992-a422-e0196cd215d7" Sep 29 13:44:20 crc kubenswrapper[4869]: I0929 13:44:20.657183 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:44:20 crc kubenswrapper[4869]: I0929 13:44:20.657292 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:44:22 crc kubenswrapper[4869]: I0929 13:44:22.293617 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.045313 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-xgs6h" podUID="24810bda-9398-4992-a422-e0196cd215d7" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.045326 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-pzt44" podUID="75480777-5b14-40ac-91cb-621a989462d0" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.127202 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.127410 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lmkh8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-4f8ds_openshift-marketplace(7bc1173a-d614-4277-968b-4139f82c942d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.128696 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-4f8ds" podUID="7bc1173a-d614-4277-968b-4139f82c942d" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.689769 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-4f8ds" podUID="7bc1173a-d614-4277-968b-4139f82c942d" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.752346 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.752531 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4x9p9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-p56nh_openshift-marketplace(e05d5f70-7b18-422e-8862-a36aa7eb47ad): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.753861 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-p56nh" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.771730 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.771860 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8rzhk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-tnwxz_openshift-marketplace(e983aa6b-2924-498a-a957-6dce64d318a0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:44:23 crc kubenswrapper[4869]: E0929 13:44:23.773248 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-tnwxz" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.114972 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-p56nh" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.115053 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-tnwxz" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.179743 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.180316 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ps9kr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-r9gmm_openshift-marketplace(3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.181944 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-r9gmm" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.250197 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.250412 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hxv6q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-4xdz4_openshift-marketplace(a3c2054b-c189-4824-8fbf-e85af17c7ad1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.251864 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-4xdz4" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.268922 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.269130 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cr6k5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-np8tv_openshift-marketplace(209e6085-24df-4c1f-ba21-016aee31035b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.270369 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-np8tv" podUID="209e6085-24df-4c1f-ba21-016aee31035b" Sep 29 13:44:25 crc kubenswrapper[4869]: I0929 13:44:25.354262 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-mxqkf"] Sep 29 13:44:25 crc kubenswrapper[4869]: I0929 13:44:25.537518 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" event={"ID":"9d791a01-f367-41f9-bd94-a7cee0b4b7c7","Type":"ContainerStarted","Data":"ec4e009530458808d34bd7bb4ba1ded8e18f88126aec0bdf64b2fb950649ed09"} Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.541199 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-np8tv" podUID="209e6085-24df-4c1f-ba21-016aee31035b" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.541265 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-4xdz4" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" Sep 29 13:44:25 crc kubenswrapper[4869]: E0929 13:44:25.542054 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-r9gmm" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" Sep 29 13:44:26 crc kubenswrapper[4869]: I0929 13:44:26.544878 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" event={"ID":"9d791a01-f367-41f9-bd94-a7cee0b4b7c7","Type":"ContainerStarted","Data":"bc7113a1b8219dd315fc7a12be5946bbd37fa52236698cff6d19e073fadd0a05"} Sep 29 13:44:26 crc kubenswrapper[4869]: I0929 13:44:26.545310 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mxqkf" event={"ID":"9d791a01-f367-41f9-bd94-a7cee0b4b7c7","Type":"ContainerStarted","Data":"9df16fbfb83f3281a15ab48ac2ae1cfd4b61c1271cff415ab6dd893deedd6ffb"} Sep 29 13:44:26 crc kubenswrapper[4869]: I0929 13:44:26.580231 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-mxqkf" podStartSLOduration=168.580206554 podStartE2EDuration="2m48.580206554s" podCreationTimestamp="2025-09-29 13:41:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:44:26.576446126 +0000 UTC m=+193.017090446" watchObservedRunningTime="2025-09-29 13:44:26.580206554 +0000 UTC m=+193.020850874" Sep 29 13:44:36 crc kubenswrapper[4869]: I0929 13:44:36.620032 4869 generic.go:334] "Generic (PLEG): container finished" podID="75480777-5b14-40ac-91cb-621a989462d0" containerID="0dad7404d79e53e1bf9b89b1bcd2c1fe954ef946297f2638cbfc7daed1bef9f1" exitCode=0 Sep 29 13:44:36 crc kubenswrapper[4869]: I0929 13:44:36.621106 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pzt44" event={"ID":"75480777-5b14-40ac-91cb-621a989462d0","Type":"ContainerDied","Data":"0dad7404d79e53e1bf9b89b1bcd2c1fe954ef946297f2638cbfc7daed1bef9f1"} Sep 29 13:44:37 crc kubenswrapper[4869]: I0929 13:44:37.631683 4869 generic.go:334] "Generic (PLEG): container finished" podID="e983aa6b-2924-498a-a957-6dce64d318a0" containerID="f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652" exitCode=0 Sep 29 13:44:37 crc kubenswrapper[4869]: I0929 13:44:37.631769 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tnwxz" event={"ID":"e983aa6b-2924-498a-a957-6dce64d318a0","Type":"ContainerDied","Data":"f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652"} Sep 29 13:44:37 crc kubenswrapper[4869]: I0929 13:44:37.640895 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pzt44" event={"ID":"75480777-5b14-40ac-91cb-621a989462d0","Type":"ContainerStarted","Data":"421b5fc1f0d419f15d6a1d472f34ba72ad798d4c3cb8c517a96d17950f6decfa"} Sep 29 13:44:37 crc kubenswrapper[4869]: I0929 13:44:37.684837 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pzt44" podStartSLOduration=2.268447892 podStartE2EDuration="54.684810852s" podCreationTimestamp="2025-09-29 13:43:43 +0000 UTC" firstStartedPulling="2025-09-29 13:43:44.927555811 +0000 UTC m=+151.368200131" lastFinishedPulling="2025-09-29 13:44:37.343918771 +0000 UTC m=+203.784563091" observedRunningTime="2025-09-29 13:44:37.68248685 +0000 UTC m=+204.123131170" watchObservedRunningTime="2025-09-29 13:44:37.684810852 +0000 UTC m=+204.125455172" Sep 29 13:44:38 crc kubenswrapper[4869]: I0929 13:44:38.650937 4869 generic.go:334] "Generic (PLEG): container finished" podID="24810bda-9398-4992-a422-e0196cd215d7" containerID="c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8" exitCode=0 Sep 29 13:44:38 crc kubenswrapper[4869]: I0929 13:44:38.650984 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgs6h" event={"ID":"24810bda-9398-4992-a422-e0196cd215d7","Type":"ContainerDied","Data":"c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8"} Sep 29 13:44:38 crc kubenswrapper[4869]: I0929 13:44:38.656385 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tnwxz" event={"ID":"e983aa6b-2924-498a-a957-6dce64d318a0","Type":"ContainerStarted","Data":"865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f"} Sep 29 13:44:38 crc kubenswrapper[4869]: I0929 13:44:38.662524 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9gmm" event={"ID":"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e","Type":"ContainerStarted","Data":"e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5"} Sep 29 13:44:38 crc kubenswrapper[4869]: I0929 13:44:38.665115 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4f8ds" event={"ID":"7bc1173a-d614-4277-968b-4139f82c942d","Type":"ContainerStarted","Data":"df9fd86690253d41ec224c3f50c8520773f28f0139c33aa5d5134c19ee41829e"} Sep 29 13:44:38 crc kubenswrapper[4869]: I0929 13:44:38.697145 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tnwxz" podStartSLOduration=2.440199182 podStartE2EDuration="54.697118738s" podCreationTimestamp="2025-09-29 13:43:44 +0000 UTC" firstStartedPulling="2025-09-29 13:43:46.014244637 +0000 UTC m=+152.454888957" lastFinishedPulling="2025-09-29 13:44:38.271164183 +0000 UTC m=+204.711808513" observedRunningTime="2025-09-29 13:44:38.692184276 +0000 UTC m=+205.132828596" watchObservedRunningTime="2025-09-29 13:44:38.697118738 +0000 UTC m=+205.137763068" Sep 29 13:44:39 crc kubenswrapper[4869]: I0929 13:44:39.675256 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgs6h" event={"ID":"24810bda-9398-4992-a422-e0196cd215d7","Type":"ContainerStarted","Data":"9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71"} Sep 29 13:44:39 crc kubenswrapper[4869]: I0929 13:44:39.678136 4869 generic.go:334] "Generic (PLEG): container finished" podID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerID="e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5" exitCode=0 Sep 29 13:44:39 crc kubenswrapper[4869]: I0929 13:44:39.678201 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9gmm" event={"ID":"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e","Type":"ContainerDied","Data":"e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5"} Sep 29 13:44:39 crc kubenswrapper[4869]: I0929 13:44:39.680536 4869 generic.go:334] "Generic (PLEG): container finished" podID="7bc1173a-d614-4277-968b-4139f82c942d" containerID="df9fd86690253d41ec224c3f50c8520773f28f0139c33aa5d5134c19ee41829e" exitCode=0 Sep 29 13:44:39 crc kubenswrapper[4869]: I0929 13:44:39.680598 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4f8ds" event={"ID":"7bc1173a-d614-4277-968b-4139f82c942d","Type":"ContainerDied","Data":"df9fd86690253d41ec224c3f50c8520773f28f0139c33aa5d5134c19ee41829e"} Sep 29 13:44:39 crc kubenswrapper[4869]: I0929 13:44:39.702188 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xgs6h" podStartSLOduration=3.395532992 podStartE2EDuration="57.702166269s" podCreationTimestamp="2025-09-29 13:43:42 +0000 UTC" firstStartedPulling="2025-09-29 13:43:44.966241148 +0000 UTC m=+151.406885468" lastFinishedPulling="2025-09-29 13:44:39.272874425 +0000 UTC m=+205.713518745" observedRunningTime="2025-09-29 13:44:39.698029758 +0000 UTC m=+206.138674098" watchObservedRunningTime="2025-09-29 13:44:39.702166269 +0000 UTC m=+206.142810609" Sep 29 13:44:40 crc kubenswrapper[4869]: I0929 13:44:40.690199 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4f8ds" event={"ID":"7bc1173a-d614-4277-968b-4139f82c942d","Type":"ContainerStarted","Data":"20385cc3b75e87573e339dda52114daeecd964edaf360daf61e09c0131e1c848"} Sep 29 13:44:40 crc kubenswrapper[4869]: I0929 13:44:40.694715 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9gmm" event={"ID":"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e","Type":"ContainerStarted","Data":"ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9"} Sep 29 13:44:40 crc kubenswrapper[4869]: I0929 13:44:40.709111 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4f8ds" podStartSLOduration=2.393994162 podStartE2EDuration="54.709083769s" podCreationTimestamp="2025-09-29 13:43:46 +0000 UTC" firstStartedPulling="2025-09-29 13:43:48.141874984 +0000 UTC m=+154.582519304" lastFinishedPulling="2025-09-29 13:44:40.456964591 +0000 UTC m=+206.897608911" observedRunningTime="2025-09-29 13:44:40.708563836 +0000 UTC m=+207.149208156" watchObservedRunningTime="2025-09-29 13:44:40.709083769 +0000 UTC m=+207.149728099" Sep 29 13:44:40 crc kubenswrapper[4869]: I0929 13:44:40.733207 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r9gmm" podStartSLOduration=2.285278466 podStartE2EDuration="57.733179816s" podCreationTimestamp="2025-09-29 13:43:43 +0000 UTC" firstStartedPulling="2025-09-29 13:43:45.003080581 +0000 UTC m=+151.443724901" lastFinishedPulling="2025-09-29 13:44:40.450981931 +0000 UTC m=+206.891626251" observedRunningTime="2025-09-29 13:44:40.731692256 +0000 UTC m=+207.172336566" watchObservedRunningTime="2025-09-29 13:44:40.733179816 +0000 UTC m=+207.173824136" Sep 29 13:44:41 crc kubenswrapper[4869]: I0929 13:44:41.701462 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p56nh" event={"ID":"e05d5f70-7b18-422e-8862-a36aa7eb47ad","Type":"ContainerStarted","Data":"faac586ab85917782c45ebab1bc0f07bfeb35b270bed72d5aca5f4993c4d8979"} Sep 29 13:44:41 crc kubenswrapper[4869]: I0929 13:44:41.702967 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np8tv" event={"ID":"209e6085-24df-4c1f-ba21-016aee31035b","Type":"ContainerStarted","Data":"e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a"} Sep 29 13:44:41 crc kubenswrapper[4869]: I0929 13:44:41.705804 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4xdz4" event={"ID":"a3c2054b-c189-4824-8fbf-e85af17c7ad1","Type":"ContainerStarted","Data":"e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629"} Sep 29 13:44:42 crc kubenswrapper[4869]: I0929 13:44:42.720958 4869 generic.go:334] "Generic (PLEG): container finished" podID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerID="faac586ab85917782c45ebab1bc0f07bfeb35b270bed72d5aca5f4993c4d8979" exitCode=0 Sep 29 13:44:42 crc kubenswrapper[4869]: I0929 13:44:42.721066 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p56nh" event={"ID":"e05d5f70-7b18-422e-8862-a36aa7eb47ad","Type":"ContainerDied","Data":"faac586ab85917782c45ebab1bc0f07bfeb35b270bed72d5aca5f4993c4d8979"} Sep 29 13:44:42 crc kubenswrapper[4869]: I0929 13:44:42.727133 4869 generic.go:334] "Generic (PLEG): container finished" podID="209e6085-24df-4c1f-ba21-016aee31035b" containerID="e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a" exitCode=0 Sep 29 13:44:42 crc kubenswrapper[4869]: I0929 13:44:42.727203 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np8tv" event={"ID":"209e6085-24df-4c1f-ba21-016aee31035b","Type":"ContainerDied","Data":"e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a"} Sep 29 13:44:42 crc kubenswrapper[4869]: I0929 13:44:42.732213 4869 generic.go:334] "Generic (PLEG): container finished" podID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerID="e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629" exitCode=0 Sep 29 13:44:42 crc kubenswrapper[4869]: I0929 13:44:42.732249 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4xdz4" event={"ID":"a3c2054b-c189-4824-8fbf-e85af17c7ad1","Type":"ContainerDied","Data":"e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629"} Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.053093 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.053176 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.403286 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.403822 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.423102 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.457631 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.656323 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.656438 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.698150 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:44:43 crc kubenswrapper[4869]: I0929 13:44:43.778563 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:44:45 crc kubenswrapper[4869]: I0929 13:44:45.004577 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:44:45 crc kubenswrapper[4869]: I0929 13:44:45.005204 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:44:45 crc kubenswrapper[4869]: I0929 13:44:45.062921 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:44:45 crc kubenswrapper[4869]: I0929 13:44:45.676597 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pzt44"] Sep 29 13:44:45 crc kubenswrapper[4869]: I0929 13:44:45.748867 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pzt44" podUID="75480777-5b14-40ac-91cb-621a989462d0" containerName="registry-server" containerID="cri-o://421b5fc1f0d419f15d6a1d472f34ba72ad798d4c3cb8c517a96d17950f6decfa" gracePeriod=2 Sep 29 13:44:45 crc kubenswrapper[4869]: I0929 13:44:45.794241 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:44:46 crc kubenswrapper[4869]: I0929 13:44:46.754986 4869 generic.go:334] "Generic (PLEG): container finished" podID="75480777-5b14-40ac-91cb-621a989462d0" containerID="421b5fc1f0d419f15d6a1d472f34ba72ad798d4c3cb8c517a96d17950f6decfa" exitCode=0 Sep 29 13:44:46 crc kubenswrapper[4869]: I0929 13:44:46.755071 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pzt44" event={"ID":"75480777-5b14-40ac-91cb-621a989462d0","Type":"ContainerDied","Data":"421b5fc1f0d419f15d6a1d472f34ba72ad798d4c3cb8c517a96d17950f6decfa"} Sep 29 13:44:46 crc kubenswrapper[4869]: I0929 13:44:46.758189 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np8tv" event={"ID":"209e6085-24df-4c1f-ba21-016aee31035b","Type":"ContainerStarted","Data":"3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6"} Sep 29 13:44:46 crc kubenswrapper[4869]: I0929 13:44:46.805436 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:44:46 crc kubenswrapper[4869]: I0929 13:44:46.805514 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:44:46 crc kubenswrapper[4869]: I0929 13:44:46.861805 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:44:46 crc kubenswrapper[4869]: I0929 13:44:46.862998 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.012668 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxb8b\" (UniqueName: \"kubernetes.io/projected/75480777-5b14-40ac-91cb-621a989462d0-kube-api-access-sxb8b\") pod \"75480777-5b14-40ac-91cb-621a989462d0\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.012797 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-catalog-content\") pod \"75480777-5b14-40ac-91cb-621a989462d0\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.012864 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-utilities\") pod \"75480777-5b14-40ac-91cb-621a989462d0\" (UID: \"75480777-5b14-40ac-91cb-621a989462d0\") " Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.013715 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-utilities" (OuterVolumeSpecName: "utilities") pod "75480777-5b14-40ac-91cb-621a989462d0" (UID: "75480777-5b14-40ac-91cb-621a989462d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.018540 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75480777-5b14-40ac-91cb-621a989462d0-kube-api-access-sxb8b" (OuterVolumeSpecName: "kube-api-access-sxb8b") pod "75480777-5b14-40ac-91cb-621a989462d0" (UID: "75480777-5b14-40ac-91cb-621a989462d0"). InnerVolumeSpecName "kube-api-access-sxb8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.069747 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "75480777-5b14-40ac-91cb-621a989462d0" (UID: "75480777-5b14-40ac-91cb-621a989462d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.114286 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.114673 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxb8b\" (UniqueName: \"kubernetes.io/projected/75480777-5b14-40ac-91cb-621a989462d0-kube-api-access-sxb8b\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.114745 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75480777-5b14-40ac-91cb-621a989462d0-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.765484 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pzt44" event={"ID":"75480777-5b14-40ac-91cb-621a989462d0","Type":"ContainerDied","Data":"4438a1d18e062cb54a4e70adab1273b6bb054cb4f058f7622919b91ee5977f2c"} Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.765572 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pzt44" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.766506 4869 scope.go:117] "RemoveContainer" containerID="421b5fc1f0d419f15d6a1d472f34ba72ad798d4c3cb8c517a96d17950f6decfa" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.786848 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-np8tv" podStartSLOduration=4.079453455 podStartE2EDuration="1m1.786824683s" podCreationTimestamp="2025-09-29 13:43:46 +0000 UTC" firstStartedPulling="2025-09-29 13:43:48.1690387 +0000 UTC m=+154.609683020" lastFinishedPulling="2025-09-29 13:44:45.876409928 +0000 UTC m=+212.317054248" observedRunningTime="2025-09-29 13:44:47.785057715 +0000 UTC m=+214.225702045" watchObservedRunningTime="2025-09-29 13:44:47.786824683 +0000 UTC m=+214.227469003" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.805081 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pzt44"] Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.822064 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:44:47 crc kubenswrapper[4869]: I0929 13:44:47.823768 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pzt44"] Sep 29 13:44:48 crc kubenswrapper[4869]: I0929 13:44:48.250340 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75480777-5b14-40ac-91cb-621a989462d0" path="/var/lib/kubelet/pods/75480777-5b14-40ac-91cb-621a989462d0/volumes" Sep 29 13:44:48 crc kubenswrapper[4869]: I0929 13:44:48.330104 4869 scope.go:117] "RemoveContainer" containerID="0dad7404d79e53e1bf9b89b1bcd2c1fe954ef946297f2638cbfc7daed1bef9f1" Sep 29 13:44:48 crc kubenswrapper[4869]: I0929 13:44:48.352805 4869 scope.go:117] "RemoveContainer" containerID="64a53bdee48065ef91aecd4386bb41a18869b6a135e7c13c1bdbe589974c0e64" Sep 29 13:44:48 crc kubenswrapper[4869]: I0929 13:44:48.775099 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4xdz4" event={"ID":"a3c2054b-c189-4824-8fbf-e85af17c7ad1","Type":"ContainerStarted","Data":"e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e"} Sep 29 13:44:48 crc kubenswrapper[4869]: I0929 13:44:48.777115 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p56nh" event={"ID":"e05d5f70-7b18-422e-8862-a36aa7eb47ad","Type":"ContainerStarted","Data":"514c8d323470f4f011381e2a6dba4bc178839f2f2c66a03a49fae0a0e7343df8"} Sep 29 13:44:48 crc kubenswrapper[4869]: I0929 13:44:48.795473 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4xdz4" podStartSLOduration=4.410142372 podStartE2EDuration="1m6.79544798s" podCreationTimestamp="2025-09-29 13:43:42 +0000 UTC" firstStartedPulling="2025-09-29 13:43:44.933868041 +0000 UTC m=+151.374512361" lastFinishedPulling="2025-09-29 13:44:47.319173659 +0000 UTC m=+213.759817969" observedRunningTime="2025-09-29 13:44:48.794057392 +0000 UTC m=+215.234701722" watchObservedRunningTime="2025-09-29 13:44:48.79544798 +0000 UTC m=+215.236092300" Sep 29 13:44:48 crc kubenswrapper[4869]: I0929 13:44:48.813239 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p56nh" podStartSLOduration=2.551480498 podStartE2EDuration="1m3.813211127s" podCreationTimestamp="2025-09-29 13:43:45 +0000 UTC" firstStartedPulling="2025-09-29 13:43:47.090982615 +0000 UTC m=+153.531626935" lastFinishedPulling="2025-09-29 13:44:48.352713244 +0000 UTC m=+214.793357564" observedRunningTime="2025-09-29 13:44:48.81223091 +0000 UTC m=+215.252875270" watchObservedRunningTime="2025-09-29 13:44:48.813211127 +0000 UTC m=+215.253855447" Sep 29 13:44:50 crc kubenswrapper[4869]: I0929 13:44:50.657004 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:44:50 crc kubenswrapper[4869]: I0929 13:44:50.657418 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:44:50 crc kubenswrapper[4869]: I0929 13:44:50.658022 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:44:50 crc kubenswrapper[4869]: I0929 13:44:50.658865 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 13:44:50 crc kubenswrapper[4869]: I0929 13:44:50.659008 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4" gracePeriod=600 Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.478391 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4f8ds"] Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.479010 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4f8ds" podUID="7bc1173a-d614-4277-968b-4139f82c942d" containerName="registry-server" containerID="cri-o://20385cc3b75e87573e339dda52114daeecd964edaf360daf61e09c0131e1c848" gracePeriod=2 Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.799719 4869 generic.go:334] "Generic (PLEG): container finished" podID="7bc1173a-d614-4277-968b-4139f82c942d" containerID="20385cc3b75e87573e339dda52114daeecd964edaf360daf61e09c0131e1c848" exitCode=0 Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.800282 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4f8ds" event={"ID":"7bc1173a-d614-4277-968b-4139f82c942d","Type":"ContainerDied","Data":"20385cc3b75e87573e339dda52114daeecd964edaf360daf61e09c0131e1c848"} Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.803661 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4" exitCode=0 Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.803730 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4"} Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.803774 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"048cd43949b58ce18d03c7d0b2e620ec54f0534c634de4bd1b7f05b6daaee7df"} Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.837730 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.989014 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-catalog-content\") pod \"7bc1173a-d614-4277-968b-4139f82c942d\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.989187 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmkh8\" (UniqueName: \"kubernetes.io/projected/7bc1173a-d614-4277-968b-4139f82c942d-kube-api-access-lmkh8\") pod \"7bc1173a-d614-4277-968b-4139f82c942d\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.989251 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-utilities\") pod \"7bc1173a-d614-4277-968b-4139f82c942d\" (UID: \"7bc1173a-d614-4277-968b-4139f82c942d\") " Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.991395 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-utilities" (OuterVolumeSpecName: "utilities") pod "7bc1173a-d614-4277-968b-4139f82c942d" (UID: "7bc1173a-d614-4277-968b-4139f82c942d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:44:51 crc kubenswrapper[4869]: I0929 13:44:51.995527 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bc1173a-d614-4277-968b-4139f82c942d-kube-api-access-lmkh8" (OuterVolumeSpecName: "kube-api-access-lmkh8") pod "7bc1173a-d614-4277-968b-4139f82c942d" (UID: "7bc1173a-d614-4277-968b-4139f82c942d"). InnerVolumeSpecName "kube-api-access-lmkh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.078002 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7bc1173a-d614-4277-968b-4139f82c942d" (UID: "7bc1173a-d614-4277-968b-4139f82c942d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.090644 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.090676 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmkh8\" (UniqueName: \"kubernetes.io/projected/7bc1173a-d614-4277-968b-4139f82c942d-kube-api-access-lmkh8\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.090695 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc1173a-d614-4277-968b-4139f82c942d-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.813508 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4f8ds" event={"ID":"7bc1173a-d614-4277-968b-4139f82c942d","Type":"ContainerDied","Data":"49e640c9dd228cbbb8c2d392f7592cfda03bf9419d15e35c500439eeee52d4c2"} Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.813972 4869 scope.go:117] "RemoveContainer" containerID="20385cc3b75e87573e339dda52114daeecd964edaf360daf61e09c0131e1c848" Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.814026 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4f8ds" Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.843908 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4f8ds"] Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.843970 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4f8ds"] Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.848977 4869 scope.go:117] "RemoveContainer" containerID="df9fd86690253d41ec224c3f50c8520773f28f0139c33aa5d5134c19ee41829e" Sep 29 13:44:52 crc kubenswrapper[4869]: I0929 13:44:52.871051 4869 scope.go:117] "RemoveContainer" containerID="b1d922c82f75f0c7ae56f8b549e242f7de2525a453d26754dca95c669c0ad1a6" Sep 29 13:44:53 crc kubenswrapper[4869]: I0929 13:44:53.099509 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:44:53 crc kubenswrapper[4869]: I0929 13:44:53.249295 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:44:53 crc kubenswrapper[4869]: I0929 13:44:53.250454 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:44:53 crc kubenswrapper[4869]: I0929 13:44:53.296270 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:44:53 crc kubenswrapper[4869]: I0929 13:44:53.701840 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:44:53 crc kubenswrapper[4869]: I0929 13:44:53.857966 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:44:54 crc kubenswrapper[4869]: I0929 13:44:54.251179 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bc1173a-d614-4277-968b-4139f82c942d" path="/var/lib/kubelet/pods/7bc1173a-d614-4277-968b-4139f82c942d/volumes" Sep 29 13:44:55 crc kubenswrapper[4869]: I0929 13:44:55.406442 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:44:55 crc kubenswrapper[4869]: I0929 13:44:55.406785 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:44:55 crc kubenswrapper[4869]: I0929 13:44:55.473639 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:44:55 crc kubenswrapper[4869]: I0929 13:44:55.872076 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.078753 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r9gmm"] Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.079078 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r9gmm" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerName="registry-server" containerID="cri-o://ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9" gracePeriod=2 Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.460861 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.461441 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.478797 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.515355 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.562702 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-utilities\") pod \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.562765 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-catalog-content\") pod \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.563231 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ps9kr\" (UniqueName: \"kubernetes.io/projected/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-kube-api-access-ps9kr\") pod \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\" (UID: \"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e\") " Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.563897 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-utilities" (OuterVolumeSpecName: "utilities") pod "3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" (UID: "3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.577090 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-kube-api-access-ps9kr" (OuterVolumeSpecName: "kube-api-access-ps9kr") pod "3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" (UID: "3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e"). InnerVolumeSpecName "kube-api-access-ps9kr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.616875 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" (UID: "3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.665454 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ps9kr\" (UniqueName: \"kubernetes.io/projected/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-kube-api-access-ps9kr\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.665505 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.665514 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.838334 4869 generic.go:334] "Generic (PLEG): container finished" podID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerID="ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9" exitCode=0 Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.839261 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9gmm" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.851833 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9gmm" event={"ID":"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e","Type":"ContainerDied","Data":"ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9"} Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.852057 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9gmm" event={"ID":"3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e","Type":"ContainerDied","Data":"2f43ff58e44364991d2347d72565b06a4ce963fbd038aa91f58b0a5e5905dbd9"} Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.852134 4869 scope.go:117] "RemoveContainer" containerID="ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.895701 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r9gmm"] Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.907506 4869 scope.go:117] "RemoveContainer" containerID="e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.907594 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.908236 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r9gmm"] Sep 29 13:44:56 crc kubenswrapper[4869]: I0929 13:44:56.939850 4869 scope.go:117] "RemoveContainer" containerID="b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601" Sep 29 13:44:57 crc kubenswrapper[4869]: I0929 13:44:57.004120 4869 scope.go:117] "RemoveContainer" containerID="ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9" Sep 29 13:44:57 crc kubenswrapper[4869]: E0929 13:44:57.004949 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9\": container with ID starting with ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9 not found: ID does not exist" containerID="ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9" Sep 29 13:44:57 crc kubenswrapper[4869]: I0929 13:44:57.005022 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9"} err="failed to get container status \"ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9\": rpc error: code = NotFound desc = could not find container \"ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9\": container with ID starting with ee64dea67f1dd36c7232e80e7a329ca743179f73c2545886240b15f4aeee70b9 not found: ID does not exist" Sep 29 13:44:57 crc kubenswrapper[4869]: I0929 13:44:57.005050 4869 scope.go:117] "RemoveContainer" containerID="e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5" Sep 29 13:44:57 crc kubenswrapper[4869]: E0929 13:44:57.005258 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5\": container with ID starting with e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5 not found: ID does not exist" containerID="e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5" Sep 29 13:44:57 crc kubenswrapper[4869]: I0929 13:44:57.005287 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5"} err="failed to get container status \"e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5\": rpc error: code = NotFound desc = could not find container \"e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5\": container with ID starting with e8c1ee0c55cb3d919d12e93c557e56725466fa285a93568eb0c93fffc38106a5 not found: ID does not exist" Sep 29 13:44:57 crc kubenswrapper[4869]: I0929 13:44:57.005301 4869 scope.go:117] "RemoveContainer" containerID="b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601" Sep 29 13:44:57 crc kubenswrapper[4869]: E0929 13:44:57.005476 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601\": container with ID starting with b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601 not found: ID does not exist" containerID="b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601" Sep 29 13:44:57 crc kubenswrapper[4869]: I0929 13:44:57.005498 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601"} err="failed to get container status \"b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601\": rpc error: code = NotFound desc = could not find container \"b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601\": container with ID starting with b778d468f503371293f511486f87756738f97db9ee4c6bfb3d540c8da5b2b601 not found: ID does not exist" Sep 29 13:44:58 crc kubenswrapper[4869]: I0929 13:44:58.248204 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" path="/var/lib/kubelet/pods/3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e/volumes" Sep 29 13:44:58 crc kubenswrapper[4869]: I0929 13:44:58.476736 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p56nh"] Sep 29 13:44:58 crc kubenswrapper[4869]: I0929 13:44:58.477021 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p56nh" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerName="registry-server" containerID="cri-o://514c8d323470f4f011381e2a6dba4bc178839f2f2c66a03a49fae0a0e7343df8" gracePeriod=2 Sep 29 13:44:58 crc kubenswrapper[4869]: I0929 13:44:58.853688 4869 generic.go:334] "Generic (PLEG): container finished" podID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerID="514c8d323470f4f011381e2a6dba4bc178839f2f2c66a03a49fae0a0e7343df8" exitCode=0 Sep 29 13:44:58 crc kubenswrapper[4869]: I0929 13:44:58.853737 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p56nh" event={"ID":"e05d5f70-7b18-422e-8862-a36aa7eb47ad","Type":"ContainerDied","Data":"514c8d323470f4f011381e2a6dba4bc178839f2f2c66a03a49fae0a0e7343df8"} Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.000946 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.120874 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-catalog-content\") pod \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.120992 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4x9p9\" (UniqueName: \"kubernetes.io/projected/e05d5f70-7b18-422e-8862-a36aa7eb47ad-kube-api-access-4x9p9\") pod \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.121054 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-utilities\") pod \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\" (UID: \"e05d5f70-7b18-422e-8862-a36aa7eb47ad\") " Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.122209 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-utilities" (OuterVolumeSpecName: "utilities") pod "e05d5f70-7b18-422e-8862-a36aa7eb47ad" (UID: "e05d5f70-7b18-422e-8862-a36aa7eb47ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.128601 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e05d5f70-7b18-422e-8862-a36aa7eb47ad-kube-api-access-4x9p9" (OuterVolumeSpecName: "kube-api-access-4x9p9") pod "e05d5f70-7b18-422e-8862-a36aa7eb47ad" (UID: "e05d5f70-7b18-422e-8862-a36aa7eb47ad"). InnerVolumeSpecName "kube-api-access-4x9p9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.135042 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e05d5f70-7b18-422e-8862-a36aa7eb47ad" (UID: "e05d5f70-7b18-422e-8862-a36aa7eb47ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.223513 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.223578 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4x9p9\" (UniqueName: \"kubernetes.io/projected/e05d5f70-7b18-422e-8862-a36aa7eb47ad-kube-api-access-4x9p9\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.223601 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05d5f70-7b18-422e-8862-a36aa7eb47ad-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.860112 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p56nh" event={"ID":"e05d5f70-7b18-422e-8862-a36aa7eb47ad","Type":"ContainerDied","Data":"15e68da78f5390b86f95964f1b05ade20a8f1d27ca4fb9b45f92c2cde5335a91"} Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.860490 4869 scope.go:117] "RemoveContainer" containerID="514c8d323470f4f011381e2a6dba4bc178839f2f2c66a03a49fae0a0e7343df8" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.860179 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p56nh" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.878879 4869 scope.go:117] "RemoveContainer" containerID="faac586ab85917782c45ebab1bc0f07bfeb35b270bed72d5aca5f4993c4d8979" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.896557 4869 scope.go:117] "RemoveContainer" containerID="0241e307a1e9f7f89e9d66a5336b657f79ac3d8f29be6f9b06322db705cf425f" Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.899067 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p56nh"] Sep 29 13:44:59 crc kubenswrapper[4869]: I0929 13:44:59.901467 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p56nh"] Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.141506 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g"] Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.141870 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75480777-5b14-40ac-91cb-621a989462d0" containerName="extract-content" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142266 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="75480777-5b14-40ac-91cb-621a989462d0" containerName="extract-content" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142293 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142304 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142320 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerName="extract-content" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142333 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerName="extract-content" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142352 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerName="extract-utilities" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142365 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerName="extract-utilities" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142384 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef675a06-3b4a-4496-919a-cb6cc1b5d49d" containerName="pruner" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142394 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef675a06-3b4a-4496-919a-cb6cc1b5d49d" containerName="pruner" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142413 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bc1173a-d614-4277-968b-4139f82c942d" containerName="extract-utilities" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142428 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bc1173a-d614-4277-968b-4139f82c942d" containerName="extract-utilities" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142446 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerName="extract-content" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142458 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerName="extract-content" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142479 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerName="extract-utilities" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142490 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerName="extract-utilities" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142504 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75480777-5b14-40ac-91cb-621a989462d0" containerName="extract-utilities" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142513 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="75480777-5b14-40ac-91cb-621a989462d0" containerName="extract-utilities" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142529 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75480777-5b14-40ac-91cb-621a989462d0" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142539 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="75480777-5b14-40ac-91cb-621a989462d0" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142556 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bc1173a-d614-4277-968b-4139f82c942d" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142569 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bc1173a-d614-4277-968b-4139f82c942d" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142584 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bc1173a-d614-4277-968b-4139f82c942d" containerName="extract-content" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142596 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bc1173a-d614-4277-968b-4139f82c942d" containerName="extract-content" Sep 29 13:45:00 crc kubenswrapper[4869]: E0929 13:45:00.142640 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142652 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142832 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142850 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd7fbb9-5ee2-4f04-a10f-149ba9b4c42e" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142866 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="75480777-5b14-40ac-91cb-621a989462d0" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142886 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef675a06-3b4a-4496-919a-cb6cc1b5d49d" containerName="pruner" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.142906 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bc1173a-d614-4277-968b-4139f82c942d" containerName="registry-server" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.143523 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.145941 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.146193 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.147572 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g"] Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.250751 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e05d5f70-7b18-422e-8862-a36aa7eb47ad" path="/var/lib/kubelet/pods/e05d5f70-7b18-422e-8862-a36aa7eb47ad/volumes" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.343797 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96kcm\" (UniqueName: \"kubernetes.io/projected/103e108b-41cc-4b2b-a550-55b3541e1614-kube-api-access-96kcm\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.344312 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/103e108b-41cc-4b2b-a550-55b3541e1614-config-volume\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.344531 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/103e108b-41cc-4b2b-a550-55b3541e1614-secret-volume\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.445571 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/103e108b-41cc-4b2b-a550-55b3541e1614-secret-volume\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.445677 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96kcm\" (UniqueName: \"kubernetes.io/projected/103e108b-41cc-4b2b-a550-55b3541e1614-kube-api-access-96kcm\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.445731 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/103e108b-41cc-4b2b-a550-55b3541e1614-config-volume\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.446495 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/103e108b-41cc-4b2b-a550-55b3541e1614-config-volume\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.452508 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/103e108b-41cc-4b2b-a550-55b3541e1614-secret-volume\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.466220 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96kcm\" (UniqueName: \"kubernetes.io/projected/103e108b-41cc-4b2b-a550-55b3541e1614-kube-api-access-96kcm\") pod \"collect-profiles-29319225-g8k2g\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.493337 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:00 crc kubenswrapper[4869]: I0929 13:45:00.919952 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g"] Sep 29 13:45:00 crc kubenswrapper[4869]: W0929 13:45:00.931430 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod103e108b_41cc_4b2b_a550_55b3541e1614.slice/crio-4c5ad9247d03dc3a137d78762c247bd5afe037e0357a49d50b5da2fa97570687 WatchSource:0}: Error finding container 4c5ad9247d03dc3a137d78762c247bd5afe037e0357a49d50b5da2fa97570687: Status 404 returned error can't find the container with id 4c5ad9247d03dc3a137d78762c247bd5afe037e0357a49d50b5da2fa97570687 Sep 29 13:45:01 crc kubenswrapper[4869]: I0929 13:45:01.874649 4869 generic.go:334] "Generic (PLEG): container finished" podID="103e108b-41cc-4b2b-a550-55b3541e1614" containerID="fff2e67cb14f9c979bfeb1d00a517d02b3a67e402db23f9519c96fe6b36268fc" exitCode=0 Sep 29 13:45:01 crc kubenswrapper[4869]: I0929 13:45:01.874727 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" event={"ID":"103e108b-41cc-4b2b-a550-55b3541e1614","Type":"ContainerDied","Data":"fff2e67cb14f9c979bfeb1d00a517d02b3a67e402db23f9519c96fe6b36268fc"} Sep 29 13:45:01 crc kubenswrapper[4869]: I0929 13:45:01.874804 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" event={"ID":"103e108b-41cc-4b2b-a550-55b3541e1614","Type":"ContainerStarted","Data":"4c5ad9247d03dc3a137d78762c247bd5afe037e0357a49d50b5da2fa97570687"} Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.095191 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.291827 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/103e108b-41cc-4b2b-a550-55b3541e1614-secret-volume\") pod \"103e108b-41cc-4b2b-a550-55b3541e1614\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.291964 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/103e108b-41cc-4b2b-a550-55b3541e1614-config-volume\") pod \"103e108b-41cc-4b2b-a550-55b3541e1614\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.292019 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96kcm\" (UniqueName: \"kubernetes.io/projected/103e108b-41cc-4b2b-a550-55b3541e1614-kube-api-access-96kcm\") pod \"103e108b-41cc-4b2b-a550-55b3541e1614\" (UID: \"103e108b-41cc-4b2b-a550-55b3541e1614\") " Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.292871 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/103e108b-41cc-4b2b-a550-55b3541e1614-config-volume" (OuterVolumeSpecName: "config-volume") pod "103e108b-41cc-4b2b-a550-55b3541e1614" (UID: "103e108b-41cc-4b2b-a550-55b3541e1614"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.299542 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/103e108b-41cc-4b2b-a550-55b3541e1614-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "103e108b-41cc-4b2b-a550-55b3541e1614" (UID: "103e108b-41cc-4b2b-a550-55b3541e1614"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.303239 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/103e108b-41cc-4b2b-a550-55b3541e1614-kube-api-access-96kcm" (OuterVolumeSpecName: "kube-api-access-96kcm") pod "103e108b-41cc-4b2b-a550-55b3541e1614" (UID: "103e108b-41cc-4b2b-a550-55b3541e1614"). InnerVolumeSpecName "kube-api-access-96kcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.393917 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/103e108b-41cc-4b2b-a550-55b3541e1614-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.394307 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96kcm\" (UniqueName: \"kubernetes.io/projected/103e108b-41cc-4b2b-a550-55b3541e1614-kube-api-access-96kcm\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.394348 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/103e108b-41cc-4b2b-a550-55b3541e1614-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.887341 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" event={"ID":"103e108b-41cc-4b2b-a550-55b3541e1614","Type":"ContainerDied","Data":"4c5ad9247d03dc3a137d78762c247bd5afe037e0357a49d50b5da2fa97570687"} Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.887883 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c5ad9247d03dc3a137d78762c247bd5afe037e0357a49d50b5da2fa97570687" Sep 29 13:45:03 crc kubenswrapper[4869]: I0929 13:45:03.887393 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g" Sep 29 13:45:04 crc kubenswrapper[4869]: I0929 13:45:04.601214 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-7trbl"] Sep 29 13:45:29 crc kubenswrapper[4869]: I0929 13:45:29.642826 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" podUID="d93f1bc3-9e18-4541-909a-7eb51a5fedd0" containerName="oauth-openshift" containerID="cri-o://2bf7bd6d1b256e3a8e4816291d06ba3db5841deaafd383665211591316290166" gracePeriod=15 Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.044301 4869 generic.go:334] "Generic (PLEG): container finished" podID="d93f1bc3-9e18-4541-909a-7eb51a5fedd0" containerID="2bf7bd6d1b256e3a8e4816291d06ba3db5841deaafd383665211591316290166" exitCode=0 Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.044366 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" event={"ID":"d93f1bc3-9e18-4541-909a-7eb51a5fedd0","Type":"ContainerDied","Data":"2bf7bd6d1b256e3a8e4816291d06ba3db5841deaafd383665211591316290166"} Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.545629 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.589058 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-86d85988f6-k7wkl"] Sep 29 13:45:30 crc kubenswrapper[4869]: E0929 13:45:30.589730 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="103e108b-41cc-4b2b-a550-55b3541e1614" containerName="collect-profiles" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.589746 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="103e108b-41cc-4b2b-a550-55b3541e1614" containerName="collect-profiles" Sep 29 13:45:30 crc kubenswrapper[4869]: E0929 13:45:30.589769 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d93f1bc3-9e18-4541-909a-7eb51a5fedd0" containerName="oauth-openshift" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.589779 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d93f1bc3-9e18-4541-909a-7eb51a5fedd0" containerName="oauth-openshift" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.590174 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="103e108b-41cc-4b2b-a550-55b3541e1614" containerName="collect-profiles" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.590192 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d93f1bc3-9e18-4541-909a-7eb51a5fedd0" containerName="oauth-openshift" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.590988 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.607660 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-86d85988f6-k7wkl"] Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623164 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-serving-cert\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623217 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-dir\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623314 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-error\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623340 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-router-certs\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623361 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-provider-selection\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623397 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-ocp-branding-template\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623438 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8krnx\" (UniqueName: \"kubernetes.io/projected/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-kube-api-access-8krnx\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623465 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-policies\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623480 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-session\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623498 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-login\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623519 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-service-ca\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623547 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-idp-0-file-data\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623570 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-cliconfig\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623636 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-trusted-ca-bundle\") pod \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\" (UID: \"d93f1bc3-9e18-4541-909a-7eb51a5fedd0\") " Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623820 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623846 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-error\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623870 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623919 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623953 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-cliconfig\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.623981 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.624008 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjgf8\" (UniqueName: \"kubernetes.io/projected/42cf4022-2628-4e51-be0e-882e1c831297-kube-api-access-zjgf8\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.624052 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-audit-policies\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.624069 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-session\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.624094 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-serving-cert\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.624115 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-router-certs\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.624134 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-service-ca\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.624152 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-login\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.624172 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/42cf4022-2628-4e51-be0e-882e1c831297-audit-dir\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.625147 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.626903 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.626960 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.626880 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.626991 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.632571 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.632771 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-kube-api-access-8krnx" (OuterVolumeSpecName: "kube-api-access-8krnx") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "kube-api-access-8krnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.636878 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.644682 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.645145 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.645665 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.645882 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.645986 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.646205 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "d93f1bc3-9e18-4541-909a-7eb51a5fedd0" (UID: "d93f1bc3-9e18-4541-909a-7eb51a5fedd0"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.725547 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726352 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjgf8\" (UniqueName: \"kubernetes.io/projected/42cf4022-2628-4e51-be0e-882e1c831297-kube-api-access-zjgf8\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726403 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-audit-policies\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726477 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-session\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726512 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-router-certs\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726531 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-serving-cert\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726549 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-service-ca\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726569 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-login\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726591 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/42cf4022-2628-4e51-be0e-882e1c831297-audit-dir\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726656 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726687 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-error\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726712 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726731 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726760 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-cliconfig\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726842 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726858 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726871 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726883 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726894 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8krnx\" (UniqueName: \"kubernetes.io/projected/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-kube-api-access-8krnx\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726929 4869 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-policies\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726938 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726948 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726962 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726973 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726983 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.726993 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.727003 4869 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.727015 4869 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d93f1bc3-9e18-4541-909a-7eb51a5fedd0-audit-dir\") on node \"crc\" DevicePath \"\"" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.727809 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-audit-policies\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.728099 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-cliconfig\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.728144 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/42cf4022-2628-4e51-be0e-882e1c831297-audit-dir\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.728966 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.729399 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-service-ca\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.729933 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.731302 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-serving-cert\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.731672 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.731939 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-session\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.732322 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-error\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.732731 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-router-certs\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.732943 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-user-template-login\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.734336 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/42cf4022-2628-4e51-be0e-882e1c831297-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.745823 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjgf8\" (UniqueName: \"kubernetes.io/projected/42cf4022-2628-4e51-be0e-882e1c831297-kube-api-access-zjgf8\") pod \"oauth-openshift-86d85988f6-k7wkl\" (UID: \"42cf4022-2628-4e51-be0e-882e1c831297\") " pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:30 crc kubenswrapper[4869]: I0929 13:45:30.912279 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:31 crc kubenswrapper[4869]: I0929 13:45:31.052409 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" event={"ID":"d93f1bc3-9e18-4541-909a-7eb51a5fedd0","Type":"ContainerDied","Data":"140ef9b9e74ff6485d126291c14bc51a0bd82d87e952396ae9882bd5770ef31d"} Sep 29 13:45:31 crc kubenswrapper[4869]: I0929 13:45:31.052480 4869 scope.go:117] "RemoveContainer" containerID="2bf7bd6d1b256e3a8e4816291d06ba3db5841deaafd383665211591316290166" Sep 29 13:45:31 crc kubenswrapper[4869]: I0929 13:45:31.052646 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-7trbl" Sep 29 13:45:31 crc kubenswrapper[4869]: I0929 13:45:31.093564 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-7trbl"] Sep 29 13:45:31 crc kubenswrapper[4869]: I0929 13:45:31.107841 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-7trbl"] Sep 29 13:45:31 crc kubenswrapper[4869]: I0929 13:45:31.160451 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-86d85988f6-k7wkl"] Sep 29 13:45:32 crc kubenswrapper[4869]: I0929 13:45:32.059811 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" event={"ID":"42cf4022-2628-4e51-be0e-882e1c831297","Type":"ContainerStarted","Data":"c97fae5538211f1835d954ff5a08481b77abb70b7a778a75fe589962f733be47"} Sep 29 13:45:32 crc kubenswrapper[4869]: I0929 13:45:32.060149 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" event={"ID":"42cf4022-2628-4e51-be0e-882e1c831297","Type":"ContainerStarted","Data":"3d276d1c8001ef7cbbd6f7c75527cc9ad98a240277912d9ea31e644388fc32df"} Sep 29 13:45:32 crc kubenswrapper[4869]: I0929 13:45:32.060167 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:45:32 crc kubenswrapper[4869]: I0929 13:45:32.085288 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" podStartSLOduration=28.085254188 podStartE2EDuration="28.085254188s" podCreationTimestamp="2025-09-29 13:45:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:45:32.079846823 +0000 UTC m=+258.520491143" watchObservedRunningTime="2025-09-29 13:45:32.085254188 +0000 UTC m=+258.525898508" Sep 29 13:45:32 crc kubenswrapper[4869]: I0929 13:45:32.249270 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d93f1bc3-9e18-4541-909a-7eb51a5fedd0" path="/var/lib/kubelet/pods/d93f1bc3-9e18-4541-909a-7eb51a5fedd0/volumes" Sep 29 13:45:32 crc kubenswrapper[4869]: I0929 13:45:32.256348 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-86d85988f6-k7wkl" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.696522 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4xdz4"] Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.697748 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4xdz4" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerName="registry-server" containerID="cri-o://e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e" gracePeriod=30 Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.713813 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xgs6h"] Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.714162 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xgs6h" podUID="24810bda-9398-4992-a422-e0196cd215d7" containerName="registry-server" containerID="cri-o://9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71" gracePeriod=30 Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.728348 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwxhh"] Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.728984 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" podUID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" containerName="marketplace-operator" containerID="cri-o://61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc" gracePeriod=30 Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.735752 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tnwxz"] Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.736082 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tnwxz" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" containerName="registry-server" containerID="cri-o://865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f" gracePeriod=30 Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.745507 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tssgg"] Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.746528 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.748132 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-np8tv"] Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.748457 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-np8tv" podUID="209e6085-24df-4c1f-ba21-016aee31035b" containerName="registry-server" containerID="cri-o://3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6" gracePeriod=30 Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.761294 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tssgg"] Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.850265 4869 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bwxhh container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.850380 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" podUID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.872587 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/22f29fa0-8b84-4865-b786-53dc0a324c3b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.872684 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kqqq\" (UniqueName: \"kubernetes.io/projected/22f29fa0-8b84-4865-b786-53dc0a324c3b-kube-api-access-7kqqq\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.872723 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/22f29fa0-8b84-4865-b786-53dc0a324c3b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.974703 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/22f29fa0-8b84-4865-b786-53dc0a324c3b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.974939 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kqqq\" (UniqueName: \"kubernetes.io/projected/22f29fa0-8b84-4865-b786-53dc0a324c3b-kube-api-access-7kqqq\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.975044 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/22f29fa0-8b84-4865-b786-53dc0a324c3b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.977585 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/22f29fa0-8b84-4865-b786-53dc0a324c3b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.988280 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/22f29fa0-8b84-4865-b786-53dc0a324c3b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:45 crc kubenswrapper[4869]: I0929 13:46:45.994894 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kqqq\" (UniqueName: \"kubernetes.io/projected/22f29fa0-8b84-4865-b786-53dc0a324c3b-kube-api-access-7kqqq\") pod \"marketplace-operator-79b997595-tssgg\" (UID: \"22f29fa0-8b84-4865-b786-53dc0a324c3b\") " pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.145270 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.166836 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.177702 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.195176 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.220434 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.221254 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280271 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2mzx\" (UniqueName: \"kubernetes.io/projected/773d63e6-d34c-4320-8cab-c77b91b3c8b2-kube-api-access-s2mzx\") pod \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280319 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxv6q\" (UniqueName: \"kubernetes.io/projected/a3c2054b-c189-4824-8fbf-e85af17c7ad1-kube-api-access-hxv6q\") pod \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280372 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cr6k5\" (UniqueName: \"kubernetes.io/projected/209e6085-24df-4c1f-ba21-016aee31035b-kube-api-access-cr6k5\") pod \"209e6085-24df-4c1f-ba21-016aee31035b\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280473 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-trusted-ca\") pod \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280625 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-catalog-content\") pod \"e983aa6b-2924-498a-a957-6dce64d318a0\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280689 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-catalog-content\") pod \"24810bda-9398-4992-a422-e0196cd215d7\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280723 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-catalog-content\") pod \"209e6085-24df-4c1f-ba21-016aee31035b\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280770 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-catalog-content\") pod \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280825 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-utilities\") pod \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\" (UID: \"a3c2054b-c189-4824-8fbf-e85af17c7ad1\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280854 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rzhk\" (UniqueName: \"kubernetes.io/projected/e983aa6b-2924-498a-a957-6dce64d318a0-kube-api-access-8rzhk\") pod \"e983aa6b-2924-498a-a957-6dce64d318a0\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.280950 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-utilities\") pod \"e983aa6b-2924-498a-a957-6dce64d318a0\" (UID: \"e983aa6b-2924-498a-a957-6dce64d318a0\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.281042 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-operator-metrics\") pod \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\" (UID: \"773d63e6-d34c-4320-8cab-c77b91b3c8b2\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.281076 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-utilities\") pod \"209e6085-24df-4c1f-ba21-016aee31035b\" (UID: \"209e6085-24df-4c1f-ba21-016aee31035b\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.281106 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-utilities\") pod \"24810bda-9398-4992-a422-e0196cd215d7\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.281157 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5mwz\" (UniqueName: \"kubernetes.io/projected/24810bda-9398-4992-a422-e0196cd215d7-kube-api-access-l5mwz\") pod \"24810bda-9398-4992-a422-e0196cd215d7\" (UID: \"24810bda-9398-4992-a422-e0196cd215d7\") " Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.281481 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "773d63e6-d34c-4320-8cab-c77b91b3c8b2" (UID: "773d63e6-d34c-4320-8cab-c77b91b3c8b2"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.282335 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-utilities" (OuterVolumeSpecName: "utilities") pod "e983aa6b-2924-498a-a957-6dce64d318a0" (UID: "e983aa6b-2924-498a-a957-6dce64d318a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.286329 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "773d63e6-d34c-4320-8cab-c77b91b3c8b2" (UID: "773d63e6-d34c-4320-8cab-c77b91b3c8b2"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.287408 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-utilities" (OuterVolumeSpecName: "utilities") pod "209e6085-24df-4c1f-ba21-016aee31035b" (UID: "209e6085-24df-4c1f-ba21-016aee31035b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.288480 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-utilities" (OuterVolumeSpecName: "utilities") pod "24810bda-9398-4992-a422-e0196cd215d7" (UID: "24810bda-9398-4992-a422-e0196cd215d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.289123 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/773d63e6-d34c-4320-8cab-c77b91b3c8b2-kube-api-access-s2mzx" (OuterVolumeSpecName: "kube-api-access-s2mzx") pod "773d63e6-d34c-4320-8cab-c77b91b3c8b2" (UID: "773d63e6-d34c-4320-8cab-c77b91b3c8b2"). InnerVolumeSpecName "kube-api-access-s2mzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.300805 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-utilities" (OuterVolumeSpecName: "utilities") pod "a3c2054b-c189-4824-8fbf-e85af17c7ad1" (UID: "a3c2054b-c189-4824-8fbf-e85af17c7ad1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.304598 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e983aa6b-2924-498a-a957-6dce64d318a0-kube-api-access-8rzhk" (OuterVolumeSpecName: "kube-api-access-8rzhk") pod "e983aa6b-2924-498a-a957-6dce64d318a0" (UID: "e983aa6b-2924-498a-a957-6dce64d318a0"). InnerVolumeSpecName "kube-api-access-8rzhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.305381 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3c2054b-c189-4824-8fbf-e85af17c7ad1-kube-api-access-hxv6q" (OuterVolumeSpecName: "kube-api-access-hxv6q") pod "a3c2054b-c189-4824-8fbf-e85af17c7ad1" (UID: "a3c2054b-c189-4824-8fbf-e85af17c7ad1"). InnerVolumeSpecName "kube-api-access-hxv6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.312301 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/209e6085-24df-4c1f-ba21-016aee31035b-kube-api-access-cr6k5" (OuterVolumeSpecName: "kube-api-access-cr6k5") pod "209e6085-24df-4c1f-ba21-016aee31035b" (UID: "209e6085-24df-4c1f-ba21-016aee31035b"). InnerVolumeSpecName "kube-api-access-cr6k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.313683 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e983aa6b-2924-498a-a957-6dce64d318a0" (UID: "e983aa6b-2924-498a-a957-6dce64d318a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.330560 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24810bda-9398-4992-a422-e0196cd215d7-kube-api-access-l5mwz" (OuterVolumeSpecName: "kube-api-access-l5mwz") pod "24810bda-9398-4992-a422-e0196cd215d7" (UID: "24810bda-9398-4992-a422-e0196cd215d7"). InnerVolumeSpecName "kube-api-access-l5mwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.358391 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24810bda-9398-4992-a422-e0196cd215d7" (UID: "24810bda-9398-4992-a422-e0196cd215d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.363317 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a3c2054b-c189-4824-8fbf-e85af17c7ad1" (UID: "a3c2054b-c189-4824-8fbf-e85af17c7ad1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382454 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382499 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3c2054b-c189-4824-8fbf-e85af17c7ad1-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382509 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rzhk\" (UniqueName: \"kubernetes.io/projected/e983aa6b-2924-498a-a957-6dce64d318a0-kube-api-access-8rzhk\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382521 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382531 4869 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382541 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382549 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382559 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5mwz\" (UniqueName: \"kubernetes.io/projected/24810bda-9398-4992-a422-e0196cd215d7-kube-api-access-l5mwz\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382568 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2mzx\" (UniqueName: \"kubernetes.io/projected/773d63e6-d34c-4320-8cab-c77b91b3c8b2-kube-api-access-s2mzx\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382576 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxv6q\" (UniqueName: \"kubernetes.io/projected/a3c2054b-c189-4824-8fbf-e85af17c7ad1-kube-api-access-hxv6q\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382585 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cr6k5\" (UniqueName: \"kubernetes.io/projected/209e6085-24df-4c1f-ba21-016aee31035b-kube-api-access-cr6k5\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382594 4869 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/773d63e6-d34c-4320-8cab-c77b91b3c8b2-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382601 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e983aa6b-2924-498a-a957-6dce64d318a0-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.382629 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24810bda-9398-4992-a422-e0196cd215d7-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.394924 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tssgg"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.413400 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "209e6085-24df-4c1f-ba21-016aee31035b" (UID: "209e6085-24df-4c1f-ba21-016aee31035b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.485059 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/209e6085-24df-4c1f-ba21-016aee31035b-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.516531 4869 generic.go:334] "Generic (PLEG): container finished" podID="209e6085-24df-4c1f-ba21-016aee31035b" containerID="3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6" exitCode=0 Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.516749 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np8tv" event={"ID":"209e6085-24df-4c1f-ba21-016aee31035b","Type":"ContainerDied","Data":"3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.517179 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np8tv" event={"ID":"209e6085-24df-4c1f-ba21-016aee31035b","Type":"ContainerDied","Data":"4d85a0ccefa09e8373a342b19ac86996189faa06cf9f8e0225267a62afef1c24"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.517205 4869 scope.go:117] "RemoveContainer" containerID="3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.516848 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-np8tv" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.524542 4869 generic.go:334] "Generic (PLEG): container finished" podID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerID="e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e" exitCode=0 Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.524670 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4xdz4" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.524681 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4xdz4" event={"ID":"a3c2054b-c189-4824-8fbf-e85af17c7ad1","Type":"ContainerDied","Data":"e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.524754 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4xdz4" event={"ID":"a3c2054b-c189-4824-8fbf-e85af17c7ad1","Type":"ContainerDied","Data":"5ef4fba5d8585b75535a652e785acf529f2e5036ba2b8f8822343f1363017d1a"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.527899 4869 generic.go:334] "Generic (PLEG): container finished" podID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" containerID="61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc" exitCode=0 Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.528235 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" event={"ID":"773d63e6-d34c-4320-8cab-c77b91b3c8b2","Type":"ContainerDied","Data":"61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.528284 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" event={"ID":"773d63e6-d34c-4320-8cab-c77b91b3c8b2","Type":"ContainerDied","Data":"abfefd990c064113287c76a2ed1a9122276e679288ec949c8fdf814e1b40b35c"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.528989 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bwxhh" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.537124 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" event={"ID":"22f29fa0-8b84-4865-b786-53dc0a324c3b","Type":"ContainerStarted","Data":"022afa351734ba44d59b0cf87ba0b698f53e19b3402cafc201c3449c4e50efce"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.539439 4869 scope.go:117] "RemoveContainer" containerID="e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.545269 4869 generic.go:334] "Generic (PLEG): container finished" podID="24810bda-9398-4992-a422-e0196cd215d7" containerID="9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71" exitCode=0 Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.545336 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgs6h" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.545339 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgs6h" event={"ID":"24810bda-9398-4992-a422-e0196cd215d7","Type":"ContainerDied","Data":"9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.545419 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgs6h" event={"ID":"24810bda-9398-4992-a422-e0196cd215d7","Type":"ContainerDied","Data":"6a72bdfffba642fe7c930b65c1ebb9c88c4892b12cbdd30aa8d9f03d357b4ba0"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.558661 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-np8tv"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.559715 4869 generic.go:334] "Generic (PLEG): container finished" podID="e983aa6b-2924-498a-a957-6dce64d318a0" containerID="865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f" exitCode=0 Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.559775 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tnwxz" event={"ID":"e983aa6b-2924-498a-a957-6dce64d318a0","Type":"ContainerDied","Data":"865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.559803 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tnwxz" event={"ID":"e983aa6b-2924-498a-a957-6dce64d318a0","Type":"ContainerDied","Data":"950ffd415b7a293d8cd02118c8ba01fd362033ac1f77800087ae8e14684a0180"} Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.559915 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tnwxz" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.561056 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-np8tv"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.586999 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4xdz4"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.592112 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4xdz4"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.594974 4869 scope.go:117] "RemoveContainer" containerID="20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.605427 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwxhh"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.616198 4869 scope.go:117] "RemoveContainer" containerID="3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.616826 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6\": container with ID starting with 3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6 not found: ID does not exist" containerID="3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.616871 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6"} err="failed to get container status \"3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6\": rpc error: code = NotFound desc = could not find container \"3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6\": container with ID starting with 3ad0d89539017750701778b54afb2dc5ce1d770795818fcc5e1d7394098e70a6 not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.616904 4869 scope.go:117] "RemoveContainer" containerID="e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.617231 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwxhh"] Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.617528 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a\": container with ID starting with e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a not found: ID does not exist" containerID="e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.617554 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a"} err="failed to get container status \"e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a\": rpc error: code = NotFound desc = could not find container \"e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a\": container with ID starting with e8155a7ddd3a9a23b4c9baa6395e263e0d8bf61523dafbc1c0d8aef48f255a5a not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.617572 4869 scope.go:117] "RemoveContainer" containerID="20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.617914 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d\": container with ID starting with 20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d not found: ID does not exist" containerID="20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.617949 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d"} err="failed to get container status \"20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d\": rpc error: code = NotFound desc = could not find container \"20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d\": container with ID starting with 20157b7be02b45c46a5da017362f8388f594dfaca9b9899ac74a3cc34d98a13d not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.617972 4869 scope.go:117] "RemoveContainer" containerID="e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.624942 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xgs6h"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.628367 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xgs6h"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.631295 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tnwxz"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.632335 4869 scope.go:117] "RemoveContainer" containerID="e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.635229 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tnwxz"] Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.647361 4869 scope.go:117] "RemoveContainer" containerID="25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.662667 4869 scope.go:117] "RemoveContainer" containerID="e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.663072 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e\": container with ID starting with e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e not found: ID does not exist" containerID="e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.663105 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e"} err="failed to get container status \"e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e\": rpc error: code = NotFound desc = could not find container \"e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e\": container with ID starting with e476954630cba1f5e6c88a123dfb9bf4f7159d0277fc006b58fb1194a662588e not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.663140 4869 scope.go:117] "RemoveContainer" containerID="e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.663567 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629\": container with ID starting with e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629 not found: ID does not exist" containerID="e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.663630 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629"} err="failed to get container status \"e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629\": rpc error: code = NotFound desc = could not find container \"e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629\": container with ID starting with e6d87ffcc9709d7aff637ec56fc93f1d9776aa97caf84468555d109b047eb629 not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.663667 4869 scope.go:117] "RemoveContainer" containerID="25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.663999 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526\": container with ID starting with 25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526 not found: ID does not exist" containerID="25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.664030 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526"} err="failed to get container status \"25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526\": rpc error: code = NotFound desc = could not find container \"25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526\": container with ID starting with 25398956858a4616c507aca6d25d8e62dc15cfba2817ae347579f094792f3526 not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.664047 4869 scope.go:117] "RemoveContainer" containerID="61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.677953 4869 scope.go:117] "RemoveContainer" containerID="61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.679112 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc\": container with ID starting with 61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc not found: ID does not exist" containerID="61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.679148 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc"} err="failed to get container status \"61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc\": rpc error: code = NotFound desc = could not find container \"61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc\": container with ID starting with 61a7c7f01c45a2e499881cd6e25750956dbba4736db7e54e6bc1defa1130f9dc not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.679168 4869 scope.go:117] "RemoveContainer" containerID="9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.696129 4869 scope.go:117] "RemoveContainer" containerID="c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.716635 4869 scope.go:117] "RemoveContainer" containerID="34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.733285 4869 scope.go:117] "RemoveContainer" containerID="9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.734044 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71\": container with ID starting with 9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71 not found: ID does not exist" containerID="9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.734086 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71"} err="failed to get container status \"9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71\": rpc error: code = NotFound desc = could not find container \"9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71\": container with ID starting with 9f2736288cea3af4705abfce8ef1e050d5cf3f0068fc2a571e93c949ab688f71 not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.734128 4869 scope.go:117] "RemoveContainer" containerID="c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.734798 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8\": container with ID starting with c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8 not found: ID does not exist" containerID="c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.734849 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8"} err="failed to get container status \"c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8\": rpc error: code = NotFound desc = could not find container \"c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8\": container with ID starting with c0cd4f6869a4c22c54229f9ae5be99e09419c0531f16ef95d4c9ece9a1bd42e8 not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.734868 4869 scope.go:117] "RemoveContainer" containerID="34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.735812 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08\": container with ID starting with 34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08 not found: ID does not exist" containerID="34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.735843 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08"} err="failed to get container status \"34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08\": rpc error: code = NotFound desc = could not find container \"34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08\": container with ID starting with 34c583fe2394e0838f856d9e35f4b8c13560488054bce4352d9e3b06b5846d08 not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.735864 4869 scope.go:117] "RemoveContainer" containerID="865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.752520 4869 scope.go:117] "RemoveContainer" containerID="f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.770237 4869 scope.go:117] "RemoveContainer" containerID="4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.784929 4869 scope.go:117] "RemoveContainer" containerID="865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.785767 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f\": container with ID starting with 865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f not found: ID does not exist" containerID="865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.785805 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f"} err="failed to get container status \"865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f\": rpc error: code = NotFound desc = could not find container \"865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f\": container with ID starting with 865e968455bee0221fe0072cf7406ba5f7101c50195eaf334358b32ac4e2a75f not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.785832 4869 scope.go:117] "RemoveContainer" containerID="f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.786295 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652\": container with ID starting with f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652 not found: ID does not exist" containerID="f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.786328 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652"} err="failed to get container status \"f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652\": rpc error: code = NotFound desc = could not find container \"f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652\": container with ID starting with f102a5e977aae8d7f777025249f55b66fa3fb191020bcdda541fdca7f8a2d652 not found: ID does not exist" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.786352 4869 scope.go:117] "RemoveContainer" containerID="4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321" Sep 29 13:46:46 crc kubenswrapper[4869]: E0929 13:46:46.786763 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321\": container with ID starting with 4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321 not found: ID does not exist" containerID="4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321" Sep 29 13:46:46 crc kubenswrapper[4869]: I0929 13:46:46.786850 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321"} err="failed to get container status \"4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321\": rpc error: code = NotFound desc = could not find container \"4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321\": container with ID starting with 4aed63796c87d6c12ddd1aae5cc4ff32afe670137c599f714fc89b02ada3f321 not found: ID does not exist" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.578271 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" event={"ID":"22f29fa0-8b84-4865-b786-53dc0a324c3b","Type":"ContainerStarted","Data":"5a24a9fbc63bfcf2d3a59534b19e9bc4b5b6f5dedb5429f7e9b121076783ca12"} Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.578599 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.581440 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.595126 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-tssgg" podStartSLOduration=2.595103667 podStartE2EDuration="2.595103667s" podCreationTimestamp="2025-09-29 13:46:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:46:47.591841946 +0000 UTC m=+334.032486286" watchObservedRunningTime="2025-09-29 13:46:47.595103667 +0000 UTC m=+334.035747987" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.916409 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fffjd"] Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.916966 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24810bda-9398-4992-a422-e0196cd215d7" containerName="extract-content" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.916978 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="24810bda-9398-4992-a422-e0196cd215d7" containerName="extract-content" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.916988 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.916995 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917024 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerName="extract-content" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917031 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerName="extract-content" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917041 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="209e6085-24df-4c1f-ba21-016aee31035b" containerName="extract-content" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917047 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="209e6085-24df-4c1f-ba21-016aee31035b" containerName="extract-content" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917059 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24810bda-9398-4992-a422-e0196cd215d7" containerName="extract-utilities" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917065 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="24810bda-9398-4992-a422-e0196cd215d7" containerName="extract-utilities" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917077 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="209e6085-24df-4c1f-ba21-016aee31035b" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917085 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="209e6085-24df-4c1f-ba21-016aee31035b" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917092 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="209e6085-24df-4c1f-ba21-016aee31035b" containerName="extract-utilities" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917098 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="209e6085-24df-4c1f-ba21-016aee31035b" containerName="extract-utilities" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917105 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerName="extract-utilities" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917111 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerName="extract-utilities" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917119 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" containerName="extract-utilities" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917124 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" containerName="extract-utilities" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917131 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" containerName="extract-content" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917137 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" containerName="extract-content" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917151 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" containerName="marketplace-operator" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917158 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" containerName="marketplace-operator" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917166 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917171 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: E0929 13:46:47.917181 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24810bda-9398-4992-a422-e0196cd215d7" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917186 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="24810bda-9398-4992-a422-e0196cd215d7" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917273 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917284 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="24810bda-9398-4992-a422-e0196cd215d7" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917296 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="209e6085-24df-4c1f-ba21-016aee31035b" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917324 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" containerName="marketplace-operator" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.917333 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" containerName="registry-server" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.918700 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.924359 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fffjd"] Sep 29 13:46:47 crc kubenswrapper[4869]: I0929 13:46:47.924823 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.003386 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-utilities\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.003449 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwnp6\" (UniqueName: \"kubernetes.io/projected/5db434b8-32b9-4401-aac4-2865a87bfdb1-kube-api-access-jwnp6\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.003506 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-catalog-content\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.104386 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-utilities\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.104462 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwnp6\" (UniqueName: \"kubernetes.io/projected/5db434b8-32b9-4401-aac4-2865a87bfdb1-kube-api-access-jwnp6\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.104535 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-catalog-content\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.105025 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-utilities\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.105099 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-catalog-content\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.117649 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7797k"] Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.121118 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.123826 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.136278 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7797k"] Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.142743 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwnp6\" (UniqueName: \"kubernetes.io/projected/5db434b8-32b9-4401-aac4-2865a87bfdb1-kube-api-access-jwnp6\") pod \"certified-operators-fffjd\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.206211 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-utilities\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.206281 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xlkn\" (UniqueName: \"kubernetes.io/projected/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-kube-api-access-7xlkn\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.206331 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-catalog-content\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.247978 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="209e6085-24df-4c1f-ba21-016aee31035b" path="/var/lib/kubelet/pods/209e6085-24df-4c1f-ba21-016aee31035b/volumes" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.248886 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24810bda-9398-4992-a422-e0196cd215d7" path="/var/lib/kubelet/pods/24810bda-9398-4992-a422-e0196cd215d7/volumes" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.249705 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="773d63e6-d34c-4320-8cab-c77b91b3c8b2" path="/var/lib/kubelet/pods/773d63e6-d34c-4320-8cab-c77b91b3c8b2/volumes" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.250828 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3c2054b-c189-4824-8fbf-e85af17c7ad1" path="/var/lib/kubelet/pods/a3c2054b-c189-4824-8fbf-e85af17c7ad1/volumes" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.251532 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e983aa6b-2924-498a-a957-6dce64d318a0" path="/var/lib/kubelet/pods/e983aa6b-2924-498a-a957-6dce64d318a0/volumes" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.259364 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.307421 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-utilities\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.307470 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xlkn\" (UniqueName: \"kubernetes.io/projected/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-kube-api-access-7xlkn\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.307522 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-catalog-content\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.308008 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-catalog-content\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.308000 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-utilities\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.328167 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xlkn\" (UniqueName: \"kubernetes.io/projected/0b98faa6-4adb-4767-adb6-504b9a6e2eb7-kube-api-access-7xlkn\") pod \"community-operators-7797k\" (UID: \"0b98faa6-4adb-4767-adb6-504b9a6e2eb7\") " pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.456365 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fffjd"] Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.462835 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:48 crc kubenswrapper[4869]: W0929 13:46:48.464366 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5db434b8_32b9_4401_aac4_2865a87bfdb1.slice/crio-63d44e317d50e6b7f528e7a9db98e74fa9befed38a22832b07481be1095f6e94 WatchSource:0}: Error finding container 63d44e317d50e6b7f528e7a9db98e74fa9befed38a22832b07481be1095f6e94: Status 404 returned error can't find the container with id 63d44e317d50e6b7f528e7a9db98e74fa9befed38a22832b07481be1095f6e94 Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.587296 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fffjd" event={"ID":"5db434b8-32b9-4401-aac4-2865a87bfdb1","Type":"ContainerStarted","Data":"63d44e317d50e6b7f528e7a9db98e74fa9befed38a22832b07481be1095f6e94"} Sep 29 13:46:48 crc kubenswrapper[4869]: I0929 13:46:48.669056 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7797k"] Sep 29 13:46:48 crc kubenswrapper[4869]: W0929 13:46:48.732587 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b98faa6_4adb_4767_adb6_504b9a6e2eb7.slice/crio-21f064bfbbbecba46fe5230aec93d6fd7d838d13db6b665186e8dee65ef1191d WatchSource:0}: Error finding container 21f064bfbbbecba46fe5230aec93d6fd7d838d13db6b665186e8dee65ef1191d: Status 404 returned error can't find the container with id 21f064bfbbbecba46fe5230aec93d6fd7d838d13db6b665186e8dee65ef1191d Sep 29 13:46:49 crc kubenswrapper[4869]: I0929 13:46:49.594276 4869 generic.go:334] "Generic (PLEG): container finished" podID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerID="d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79" exitCode=0 Sep 29 13:46:49 crc kubenswrapper[4869]: I0929 13:46:49.594594 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fffjd" event={"ID":"5db434b8-32b9-4401-aac4-2865a87bfdb1","Type":"ContainerDied","Data":"d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79"} Sep 29 13:46:49 crc kubenswrapper[4869]: I0929 13:46:49.596523 4869 generic.go:334] "Generic (PLEG): container finished" podID="0b98faa6-4adb-4767-adb6-504b9a6e2eb7" containerID="d37ebb86292989ed9053138417b8cc71c59a337fca44e123678e7841b3f02f0b" exitCode=0 Sep 29 13:46:49 crc kubenswrapper[4869]: I0929 13:46:49.597022 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7797k" event={"ID":"0b98faa6-4adb-4767-adb6-504b9a6e2eb7","Type":"ContainerDied","Data":"d37ebb86292989ed9053138417b8cc71c59a337fca44e123678e7841b3f02f0b"} Sep 29 13:46:49 crc kubenswrapper[4869]: I0929 13:46:49.597053 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7797k" event={"ID":"0b98faa6-4adb-4767-adb6-504b9a6e2eb7","Type":"ContainerStarted","Data":"21f064bfbbbecba46fe5230aec93d6fd7d838d13db6b665186e8dee65ef1191d"} Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.317711 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zptkp"] Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.319135 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.329961 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.335008 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zptkp"] Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.439225 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6d1c827-7199-4858-a9ae-1515c38a2e57-utilities\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.439298 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkv5w\" (UniqueName: \"kubernetes.io/projected/c6d1c827-7199-4858-a9ae-1515c38a2e57-kube-api-access-zkv5w\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.439338 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6d1c827-7199-4858-a9ae-1515c38a2e57-catalog-content\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.518539 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5t5v2"] Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.519920 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.522287 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.529249 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5t5v2"] Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.541748 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6d1c827-7199-4858-a9ae-1515c38a2e57-utilities\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.541856 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkv5w\" (UniqueName: \"kubernetes.io/projected/c6d1c827-7199-4858-a9ae-1515c38a2e57-kube-api-access-zkv5w\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.541895 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6d1c827-7199-4858-a9ae-1515c38a2e57-catalog-content\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.542403 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6d1c827-7199-4858-a9ae-1515c38a2e57-catalog-content\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.543089 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6d1c827-7199-4858-a9ae-1515c38a2e57-utilities\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.564702 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkv5w\" (UniqueName: \"kubernetes.io/projected/c6d1c827-7199-4858-a9ae-1515c38a2e57-kube-api-access-zkv5w\") pod \"redhat-marketplace-zptkp\" (UID: \"c6d1c827-7199-4858-a9ae-1515c38a2e57\") " pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.607574 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7797k" event={"ID":"0b98faa6-4adb-4767-adb6-504b9a6e2eb7","Type":"ContainerStarted","Data":"f2092dde86927529b6eec5f926ccff32d86a3c09cef8ec6a27e7dce21a72f5d2"} Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.609598 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fffjd" event={"ID":"5db434b8-32b9-4401-aac4-2865a87bfdb1","Type":"ContainerStarted","Data":"65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1"} Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.643399 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-catalog-content\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.643461 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw492\" (UniqueName: \"kubernetes.io/projected/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-kube-api-access-mw492\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.643495 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-utilities\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.660414 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.745379 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-utilities\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.745805 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-catalog-content\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.745839 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw492\" (UniqueName: \"kubernetes.io/projected/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-kube-api-access-mw492\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.746304 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-utilities\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.746531 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-catalog-content\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.778427 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw492\" (UniqueName: \"kubernetes.io/projected/2b1e4d3b-67b6-473f-bda1-b2fd0f253f97-kube-api-access-mw492\") pod \"redhat-operators-5t5v2\" (UID: \"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97\") " pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.842495 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:46:50 crc kubenswrapper[4869]: I0929 13:46:50.883267 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zptkp"] Sep 29 13:46:50 crc kubenswrapper[4869]: W0929 13:46:50.889299 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6d1c827_7199_4858_a9ae_1515c38a2e57.slice/crio-c1f5bffc5d2ebc198cdd0f9d794a29b0777569189b29a4d895f2e7dd8f1c76e1 WatchSource:0}: Error finding container c1f5bffc5d2ebc198cdd0f9d794a29b0777569189b29a4d895f2e7dd8f1c76e1: Status 404 returned error can't find the container with id c1f5bffc5d2ebc198cdd0f9d794a29b0777569189b29a4d895f2e7dd8f1c76e1 Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.042156 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5t5v2"] Sep 29 13:46:51 crc kubenswrapper[4869]: W0929 13:46:51.114151 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b1e4d3b_67b6_473f_bda1_b2fd0f253f97.slice/crio-5e9c240916e642cf0626709e6306558b579ee22e33a1309b6ff404a857b192ba WatchSource:0}: Error finding container 5e9c240916e642cf0626709e6306558b579ee22e33a1309b6ff404a857b192ba: Status 404 returned error can't find the container with id 5e9c240916e642cf0626709e6306558b579ee22e33a1309b6ff404a857b192ba Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.619317 4869 generic.go:334] "Generic (PLEG): container finished" podID="2b1e4d3b-67b6-473f-bda1-b2fd0f253f97" containerID="85f2bafda2bded9c4cc50e800d553abd72a95cb10e1bf71084cc7f4cf30165a0" exitCode=0 Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.619388 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t5v2" event={"ID":"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97","Type":"ContainerDied","Data":"85f2bafda2bded9c4cc50e800d553abd72a95cb10e1bf71084cc7f4cf30165a0"} Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.619739 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t5v2" event={"ID":"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97","Type":"ContainerStarted","Data":"5e9c240916e642cf0626709e6306558b579ee22e33a1309b6ff404a857b192ba"} Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.622986 4869 generic.go:334] "Generic (PLEG): container finished" podID="0b98faa6-4adb-4767-adb6-504b9a6e2eb7" containerID="f2092dde86927529b6eec5f926ccff32d86a3c09cef8ec6a27e7dce21a72f5d2" exitCode=0 Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.623050 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7797k" event={"ID":"0b98faa6-4adb-4767-adb6-504b9a6e2eb7","Type":"ContainerDied","Data":"f2092dde86927529b6eec5f926ccff32d86a3c09cef8ec6a27e7dce21a72f5d2"} Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.626577 4869 generic.go:334] "Generic (PLEG): container finished" podID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerID="65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1" exitCode=0 Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.626645 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fffjd" event={"ID":"5db434b8-32b9-4401-aac4-2865a87bfdb1","Type":"ContainerDied","Data":"65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1"} Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.630928 4869 generic.go:334] "Generic (PLEG): container finished" podID="c6d1c827-7199-4858-a9ae-1515c38a2e57" containerID="31e4c2e3358de85e160821c60a27fbfecfff3f8411885faa374503ba06b1f5e8" exitCode=0 Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.630969 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zptkp" event={"ID":"c6d1c827-7199-4858-a9ae-1515c38a2e57","Type":"ContainerDied","Data":"31e4c2e3358de85e160821c60a27fbfecfff3f8411885faa374503ba06b1f5e8"} Sep 29 13:46:51 crc kubenswrapper[4869]: I0929 13:46:51.631001 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zptkp" event={"ID":"c6d1c827-7199-4858-a9ae-1515c38a2e57","Type":"ContainerStarted","Data":"c1f5bffc5d2ebc198cdd0f9d794a29b0777569189b29a4d895f2e7dd8f1c76e1"} Sep 29 13:46:52 crc kubenswrapper[4869]: I0929 13:46:52.660175 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7797k" event={"ID":"0b98faa6-4adb-4767-adb6-504b9a6e2eb7","Type":"ContainerStarted","Data":"f40e949d8f420671fca091cf2aae510f4e3f3d80be6a56e79f653bf5c2ff2d85"} Sep 29 13:46:52 crc kubenswrapper[4869]: I0929 13:46:52.663022 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fffjd" event={"ID":"5db434b8-32b9-4401-aac4-2865a87bfdb1","Type":"ContainerStarted","Data":"12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71"} Sep 29 13:46:52 crc kubenswrapper[4869]: I0929 13:46:52.688903 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7797k" podStartSLOduration=2.227369519 podStartE2EDuration="4.688883723s" podCreationTimestamp="2025-09-29 13:46:48 +0000 UTC" firstStartedPulling="2025-09-29 13:46:49.597746513 +0000 UTC m=+336.038390833" lastFinishedPulling="2025-09-29 13:46:52.059260717 +0000 UTC m=+338.499905037" observedRunningTime="2025-09-29 13:46:52.688133542 +0000 UTC m=+339.128777862" watchObservedRunningTime="2025-09-29 13:46:52.688883723 +0000 UTC m=+339.129528043" Sep 29 13:46:52 crc kubenswrapper[4869]: I0929 13:46:52.709622 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fffjd" podStartSLOduration=3.192811327 podStartE2EDuration="5.709585805s" podCreationTimestamp="2025-09-29 13:46:47 +0000 UTC" firstStartedPulling="2025-09-29 13:46:49.596064375 +0000 UTC m=+336.036708705" lastFinishedPulling="2025-09-29 13:46:52.112838863 +0000 UTC m=+338.553483183" observedRunningTime="2025-09-29 13:46:52.709374169 +0000 UTC m=+339.150018489" watchObservedRunningTime="2025-09-29 13:46:52.709585805 +0000 UTC m=+339.150230125" Sep 29 13:46:54 crc kubenswrapper[4869]: I0929 13:46:54.678396 4869 generic.go:334] "Generic (PLEG): container finished" podID="2b1e4d3b-67b6-473f-bda1-b2fd0f253f97" containerID="f25f859fe11891574f22bff6544d335d976c19954dae204d99b1b7262877f73a" exitCode=0 Sep 29 13:46:54 crc kubenswrapper[4869]: I0929 13:46:54.678468 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t5v2" event={"ID":"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97","Type":"ContainerDied","Data":"f25f859fe11891574f22bff6544d335d976c19954dae204d99b1b7262877f73a"} Sep 29 13:46:54 crc kubenswrapper[4869]: I0929 13:46:54.683800 4869 generic.go:334] "Generic (PLEG): container finished" podID="c6d1c827-7199-4858-a9ae-1515c38a2e57" containerID="2da878074f5940b22ae89294fcd10be43f7067feea0e28499f637499787abeef" exitCode=0 Sep 29 13:46:54 crc kubenswrapper[4869]: I0929 13:46:54.683974 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zptkp" event={"ID":"c6d1c827-7199-4858-a9ae-1515c38a2e57","Type":"ContainerDied","Data":"2da878074f5940b22ae89294fcd10be43f7067feea0e28499f637499787abeef"} Sep 29 13:46:55 crc kubenswrapper[4869]: I0929 13:46:55.692087 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t5v2" event={"ID":"2b1e4d3b-67b6-473f-bda1-b2fd0f253f97","Type":"ContainerStarted","Data":"a4b7012348104db7162012c89402c5ecd2c257adb753ad6e7876b342d490edb8"} Sep 29 13:46:55 crc kubenswrapper[4869]: I0929 13:46:55.694423 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zptkp" event={"ID":"c6d1c827-7199-4858-a9ae-1515c38a2e57","Type":"ContainerStarted","Data":"91e08be6f6d5872957bc2c72bf24d19bb5085d208a17101843697648bc297dbd"} Sep 29 13:46:55 crc kubenswrapper[4869]: I0929 13:46:55.712829 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5t5v2" podStartSLOduration=2.266023869 podStartE2EDuration="5.712800764s" podCreationTimestamp="2025-09-29 13:46:50 +0000 UTC" firstStartedPulling="2025-09-29 13:46:51.623585392 +0000 UTC m=+338.064229712" lastFinishedPulling="2025-09-29 13:46:55.070362287 +0000 UTC m=+341.511006607" observedRunningTime="2025-09-29 13:46:55.708063831 +0000 UTC m=+342.148708161" watchObservedRunningTime="2025-09-29 13:46:55.712800764 +0000 UTC m=+342.153445084" Sep 29 13:46:55 crc kubenswrapper[4869]: I0929 13:46:55.734527 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zptkp" podStartSLOduration=2.253885558 podStartE2EDuration="5.734495954s" podCreationTimestamp="2025-09-29 13:46:50 +0000 UTC" firstStartedPulling="2025-09-29 13:46:51.632503663 +0000 UTC m=+338.073147983" lastFinishedPulling="2025-09-29 13:46:55.113114059 +0000 UTC m=+341.553758379" observedRunningTime="2025-09-29 13:46:55.73399317 +0000 UTC m=+342.174637520" watchObservedRunningTime="2025-09-29 13:46:55.734495954 +0000 UTC m=+342.175140274" Sep 29 13:46:58 crc kubenswrapper[4869]: I0929 13:46:58.260357 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:58 crc kubenswrapper[4869]: I0929 13:46:58.260422 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:58 crc kubenswrapper[4869]: I0929 13:46:58.302210 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:46:58 crc kubenswrapper[4869]: I0929 13:46:58.463392 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:58 crc kubenswrapper[4869]: I0929 13:46:58.463671 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:58 crc kubenswrapper[4869]: I0929 13:46:58.499898 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:58 crc kubenswrapper[4869]: I0929 13:46:58.752331 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7797k" Sep 29 13:46:58 crc kubenswrapper[4869]: I0929 13:46:58.756197 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fffjd" Sep 29 13:47:00 crc kubenswrapper[4869]: I0929 13:47:00.660833 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:47:00 crc kubenswrapper[4869]: I0929 13:47:00.661206 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:47:00 crc kubenswrapper[4869]: I0929 13:47:00.713652 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:47:00 crc kubenswrapper[4869]: I0929 13:47:00.768181 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zptkp" Sep 29 13:47:00 crc kubenswrapper[4869]: I0929 13:47:00.843639 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:47:00 crc kubenswrapper[4869]: I0929 13:47:00.843701 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:47:00 crc kubenswrapper[4869]: I0929 13:47:00.888281 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:47:01 crc kubenswrapper[4869]: I0929 13:47:01.773303 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5t5v2" Sep 29 13:47:20 crc kubenswrapper[4869]: I0929 13:47:20.657801 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:47:20 crc kubenswrapper[4869]: I0929 13:47:20.658847 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:47:50 crc kubenswrapper[4869]: I0929 13:47:50.657559 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:47:50 crc kubenswrapper[4869]: I0929 13:47:50.658703 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:48:20 crc kubenswrapper[4869]: I0929 13:48:20.656788 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:48:20 crc kubenswrapper[4869]: I0929 13:48:20.657458 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:48:20 crc kubenswrapper[4869]: I0929 13:48:20.657537 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:48:20 crc kubenswrapper[4869]: I0929 13:48:20.658296 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"048cd43949b58ce18d03c7d0b2e620ec54f0534c634de4bd1b7f05b6daaee7df"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 13:48:20 crc kubenswrapper[4869]: I0929 13:48:20.658361 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://048cd43949b58ce18d03c7d0b2e620ec54f0534c634de4bd1b7f05b6daaee7df" gracePeriod=600 Sep 29 13:48:21 crc kubenswrapper[4869]: I0929 13:48:21.231919 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="048cd43949b58ce18d03c7d0b2e620ec54f0534c634de4bd1b7f05b6daaee7df" exitCode=0 Sep 29 13:48:21 crc kubenswrapper[4869]: I0929 13:48:21.232031 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"048cd43949b58ce18d03c7d0b2e620ec54f0534c634de4bd1b7f05b6daaee7df"} Sep 29 13:48:21 crc kubenswrapper[4869]: I0929 13:48:21.232370 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"1cb08fbaefe8d34fe916a84832310adb83e37cae49b44f869d7b811ef6701bf9"} Sep 29 13:48:21 crc kubenswrapper[4869]: I0929 13:48:21.232400 4869 scope.go:117] "RemoveContainer" containerID="b7c70cfab78ba11720ed22e22f8ff745bbbc75e804281bdc4df9f73bc8895cb4" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.272672 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rhpxf"] Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.274668 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.358707 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rhpxf"] Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.390002 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.390078 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-registry-tls\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.390120 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b5363e61-0600-418b-ae73-d02611f39e10-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.390143 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b5363e61-0600-418b-ae73-d02611f39e10-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.390185 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b5363e61-0600-418b-ae73-d02611f39e10-registry-certificates\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.390206 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-bound-sa-token\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.390225 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5pb9\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-kube-api-access-x5pb9\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.390247 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b5363e61-0600-418b-ae73-d02611f39e10-trusted-ca\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.421520 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.491902 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-registry-tls\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.492349 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b5363e61-0600-418b-ae73-d02611f39e10-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.492508 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b5363e61-0600-418b-ae73-d02611f39e10-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.492665 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b5363e61-0600-418b-ae73-d02611f39e10-registry-certificates\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.492766 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-bound-sa-token\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.492853 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5pb9\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-kube-api-access-x5pb9\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.492926 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b5363e61-0600-418b-ae73-d02611f39e10-trusted-ca\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.496101 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b5363e61-0600-418b-ae73-d02611f39e10-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.502535 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b5363e61-0600-418b-ae73-d02611f39e10-registry-certificates\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.504900 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-registry-tls\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.504906 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b5363e61-0600-418b-ae73-d02611f39e10-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.506554 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b5363e61-0600-418b-ae73-d02611f39e10-trusted-ca\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.512906 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5pb9\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-kube-api-access-x5pb9\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.513898 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b5363e61-0600-418b-ae73-d02611f39e10-bound-sa-token\") pod \"image-registry-66df7c8f76-rhpxf\" (UID: \"b5363e61-0600-418b-ae73-d02611f39e10\") " pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.602150 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:46 crc kubenswrapper[4869]: I0929 13:48:46.817515 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rhpxf"] Sep 29 13:48:47 crc kubenswrapper[4869]: I0929 13:48:47.391700 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" event={"ID":"b5363e61-0600-418b-ae73-d02611f39e10","Type":"ContainerStarted","Data":"15b56c74dddb088cabdbd56b8908d0690efcb04e92dbf70dba666d96a2296358"} Sep 29 13:48:47 crc kubenswrapper[4869]: I0929 13:48:47.391798 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" event={"ID":"b5363e61-0600-418b-ae73-d02611f39e10","Type":"ContainerStarted","Data":"185ed0925353f1851462ea97f1b3889499a7004c4d76ddbf9c42d35272b58b92"} Sep 29 13:48:47 crc kubenswrapper[4869]: I0929 13:48:47.391918 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:48:47 crc kubenswrapper[4869]: I0929 13:48:47.471110 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" podStartSLOduration=1.47107417 podStartE2EDuration="1.47107417s" podCreationTimestamp="2025-09-29 13:48:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:48:47.467119732 +0000 UTC m=+453.907764052" watchObservedRunningTime="2025-09-29 13:48:47.47107417 +0000 UTC m=+453.911718490" Sep 29 13:49:06 crc kubenswrapper[4869]: I0929 13:49:06.607152 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-rhpxf" Sep 29 13:49:06 crc kubenswrapper[4869]: I0929 13:49:06.680385 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cl5ps"] Sep 29 13:49:31 crc kubenswrapper[4869]: I0929 13:49:31.723122 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" podUID="a39dcbb0-84e5-458c-9a0a-6d3388f423df" containerName="registry" containerID="cri-o://b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9" gracePeriod=30 Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.091570 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.149976 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-bound-sa-token\") pod \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.150043 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-tls\") pod \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.150090 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a39dcbb0-84e5-458c-9a0a-6d3388f423df-ca-trust-extracted\") pod \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.150138 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-trusted-ca\") pod \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.150186 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdwsm\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-kube-api-access-sdwsm\") pod \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.150241 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a39dcbb0-84e5-458c-9a0a-6d3388f423df-installation-pull-secrets\") pod \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.150406 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.150441 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-certificates\") pod \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\" (UID: \"a39dcbb0-84e5-458c-9a0a-6d3388f423df\") " Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.151658 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a39dcbb0-84e5-458c-9a0a-6d3388f423df" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.151818 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "a39dcbb0-84e5-458c-9a0a-6d3388f423df" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.158629 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a39dcbb0-84e5-458c-9a0a-6d3388f423df-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "a39dcbb0-84e5-458c-9a0a-6d3388f423df" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.160874 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a39dcbb0-84e5-458c-9a0a-6d3388f423df" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.162128 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "a39dcbb0-84e5-458c-9a0a-6d3388f423df" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.164684 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "a39dcbb0-84e5-458c-9a0a-6d3388f423df" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.169940 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-kube-api-access-sdwsm" (OuterVolumeSpecName: "kube-api-access-sdwsm") pod "a39dcbb0-84e5-458c-9a0a-6d3388f423df" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df"). InnerVolumeSpecName "kube-api-access-sdwsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.178630 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a39dcbb0-84e5-458c-9a0a-6d3388f423df-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "a39dcbb0-84e5-458c-9a0a-6d3388f423df" (UID: "a39dcbb0-84e5-458c-9a0a-6d3388f423df"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.252146 4869 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a39dcbb0-84e5-458c-9a0a-6d3388f423df-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.252182 4869 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-certificates\") on node \"crc\" DevicePath \"\"" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.252193 4869 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-bound-sa-token\") on node \"crc\" DevicePath \"\"" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.252204 4869 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-registry-tls\") on node \"crc\" DevicePath \"\"" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.252214 4869 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a39dcbb0-84e5-458c-9a0a-6d3388f423df-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.252222 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a39dcbb0-84e5-458c-9a0a-6d3388f423df-trusted-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.252231 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdwsm\" (UniqueName: \"kubernetes.io/projected/a39dcbb0-84e5-458c-9a0a-6d3388f423df-kube-api-access-sdwsm\") on node \"crc\" DevicePath \"\"" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.684303 4869 generic.go:334] "Generic (PLEG): container finished" podID="a39dcbb0-84e5-458c-9a0a-6d3388f423df" containerID="b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9" exitCode=0 Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.684377 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" event={"ID":"a39dcbb0-84e5-458c-9a0a-6d3388f423df","Type":"ContainerDied","Data":"b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9"} Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.684416 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.684466 4869 scope.go:117] "RemoveContainer" containerID="b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.684433 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-cl5ps" event={"ID":"a39dcbb0-84e5-458c-9a0a-6d3388f423df","Type":"ContainerDied","Data":"e11ea99a75745a39a923c099f96585d31380e451f13c59fd707d5494e7f5e992"} Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.722666 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cl5ps"] Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.726933 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cl5ps"] Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.737065 4869 scope.go:117] "RemoveContainer" containerID="b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9" Sep 29 13:49:32 crc kubenswrapper[4869]: E0929 13:49:32.737989 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9\": container with ID starting with b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9 not found: ID does not exist" containerID="b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9" Sep 29 13:49:32 crc kubenswrapper[4869]: I0929 13:49:32.738046 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9"} err="failed to get container status \"b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9\": rpc error: code = NotFound desc = could not find container \"b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9\": container with ID starting with b4b71716e92d5e25f4777ef1a4b61f1c32e9c01f04d246987251ec5b65ec24d9 not found: ID does not exist" Sep 29 13:49:34 crc kubenswrapper[4869]: I0929 13:49:34.255913 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a39dcbb0-84e5-458c-9a0a-6d3388f423df" path="/var/lib/kubelet/pods/a39dcbb0-84e5-458c-9a0a-6d3388f423df/volumes" Sep 29 13:50:20 crc kubenswrapper[4869]: I0929 13:50:20.658053 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:50:20 crc kubenswrapper[4869]: I0929 13:50:20.659090 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:50:50 crc kubenswrapper[4869]: I0929 13:50:50.657021 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:50:50 crc kubenswrapper[4869]: I0929 13:50:50.657986 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:51:20 crc kubenswrapper[4869]: I0929 13:51:20.658366 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:51:20 crc kubenswrapper[4869]: I0929 13:51:20.659471 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:51:20 crc kubenswrapper[4869]: I0929 13:51:20.659554 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:51:20 crc kubenswrapper[4869]: I0929 13:51:20.660408 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1cb08fbaefe8d34fe916a84832310adb83e37cae49b44f869d7b811ef6701bf9"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 13:51:20 crc kubenswrapper[4869]: I0929 13:51:20.660474 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://1cb08fbaefe8d34fe916a84832310adb83e37cae49b44f869d7b811ef6701bf9" gracePeriod=600 Sep 29 13:51:21 crc kubenswrapper[4869]: I0929 13:51:21.391287 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="1cb08fbaefe8d34fe916a84832310adb83e37cae49b44f869d7b811ef6701bf9" exitCode=0 Sep 29 13:51:21 crc kubenswrapper[4869]: I0929 13:51:21.391372 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"1cb08fbaefe8d34fe916a84832310adb83e37cae49b44f869d7b811ef6701bf9"} Sep 29 13:51:21 crc kubenswrapper[4869]: I0929 13:51:21.392291 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"044bc1bbf05f7114209acb0aca5c7026aa69010563b4d0217b04eb424198b8d6"} Sep 29 13:51:21 crc kubenswrapper[4869]: I0929 13:51:21.392326 4869 scope.go:117] "RemoveContainer" containerID="048cd43949b58ce18d03c7d0b2e620ec54f0534c634de4bd1b7f05b6daaee7df" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.974776 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mbljb"] Sep 29 13:52:32 crc kubenswrapper[4869]: E0929 13:52:32.977592 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39dcbb0-84e5-458c-9a0a-6d3388f423df" containerName="registry" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.977746 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39dcbb0-84e5-458c-9a0a-6d3388f423df" containerName="registry" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.977962 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="a39dcbb0-84e5-458c-9a0a-6d3388f423df" containerName="registry" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.978600 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mbljb" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.981416 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.982876 4869 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-5cvhx" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.982878 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.984510 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-mbrxl"] Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.985442 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-mbrxl" Sep 29 13:52:32 crc kubenswrapper[4869]: W0929 13:52:32.987080 4869 reflector.go:561] object-"cert-manager"/"cert-manager-dockercfg-t5w66": failed to list *v1.Secret: secrets "cert-manager-dockercfg-t5w66" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "cert-manager": no relationship found between node 'crc' and this object Sep 29 13:52:32 crc kubenswrapper[4869]: E0929 13:52:32.987191 4869 reflector.go:158] "Unhandled Error" err="object-\"cert-manager\"/\"cert-manager-dockercfg-t5w66\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cert-manager-dockercfg-t5w66\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"cert-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Sep 29 13:52:32 crc kubenswrapper[4869]: I0929 13:52:32.993790 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mbljb"] Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.006282 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-l5274"] Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.007310 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.010136 4869 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-s2z25" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.025323 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-mbrxl"] Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.035957 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-l5274"] Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.129190 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzrxf\" (UniqueName: \"kubernetes.io/projected/a1343538-595b-4e8b-9c3a-dbb5abd2607d-kube-api-access-wzrxf\") pod \"cert-manager-webhook-5655c58dd6-l5274\" (UID: \"a1343538-595b-4e8b-9c3a-dbb5abd2607d\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.129278 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlr26\" (UniqueName: \"kubernetes.io/projected/bba4c7ae-ab90-4f36-b529-0fb96008204c-kube-api-access-rlr26\") pod \"cert-manager-cainjector-7f985d654d-mbljb\" (UID: \"bba4c7ae-ab90-4f36-b529-0fb96008204c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mbljb" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.130307 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4clz9\" (UniqueName: \"kubernetes.io/projected/d49de4d2-6830-45ee-adf5-57fa0cfc58ec-kube-api-access-4clz9\") pod \"cert-manager-5b446d88c5-mbrxl\" (UID: \"d49de4d2-6830-45ee-adf5-57fa0cfc58ec\") " pod="cert-manager/cert-manager-5b446d88c5-mbrxl" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.232019 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4clz9\" (UniqueName: \"kubernetes.io/projected/d49de4d2-6830-45ee-adf5-57fa0cfc58ec-kube-api-access-4clz9\") pod \"cert-manager-5b446d88c5-mbrxl\" (UID: \"d49de4d2-6830-45ee-adf5-57fa0cfc58ec\") " pod="cert-manager/cert-manager-5b446d88c5-mbrxl" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.232121 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzrxf\" (UniqueName: \"kubernetes.io/projected/a1343538-595b-4e8b-9c3a-dbb5abd2607d-kube-api-access-wzrxf\") pod \"cert-manager-webhook-5655c58dd6-l5274\" (UID: \"a1343538-595b-4e8b-9c3a-dbb5abd2607d\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.232165 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlr26\" (UniqueName: \"kubernetes.io/projected/bba4c7ae-ab90-4f36-b529-0fb96008204c-kube-api-access-rlr26\") pod \"cert-manager-cainjector-7f985d654d-mbljb\" (UID: \"bba4c7ae-ab90-4f36-b529-0fb96008204c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mbljb" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.257364 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlr26\" (UniqueName: \"kubernetes.io/projected/bba4c7ae-ab90-4f36-b529-0fb96008204c-kube-api-access-rlr26\") pod \"cert-manager-cainjector-7f985d654d-mbljb\" (UID: \"bba4c7ae-ab90-4f36-b529-0fb96008204c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mbljb" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.258751 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4clz9\" (UniqueName: \"kubernetes.io/projected/d49de4d2-6830-45ee-adf5-57fa0cfc58ec-kube-api-access-4clz9\") pod \"cert-manager-5b446d88c5-mbrxl\" (UID: \"d49de4d2-6830-45ee-adf5-57fa0cfc58ec\") " pod="cert-manager/cert-manager-5b446d88c5-mbrxl" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.266858 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzrxf\" (UniqueName: \"kubernetes.io/projected/a1343538-595b-4e8b-9c3a-dbb5abd2607d-kube-api-access-wzrxf\") pod \"cert-manager-webhook-5655c58dd6-l5274\" (UID: \"a1343538-595b-4e8b-9c3a-dbb5abd2607d\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.304679 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mbljb" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.338311 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.549805 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mbljb"] Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.562961 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.604149 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-l5274"] Sep 29 13:52:33 crc kubenswrapper[4869]: W0929 13:52:33.607425 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1343538_595b_4e8b_9c3a_dbb5abd2607d.slice/crio-e35b831968d00d05a13617f71dd3cd1402536421df48eea68c26db2d6ebd85b8 WatchSource:0}: Error finding container e35b831968d00d05a13617f71dd3cd1402536421df48eea68c26db2d6ebd85b8: Status 404 returned error can't find the container with id e35b831968d00d05a13617f71dd3cd1402536421df48eea68c26db2d6ebd85b8 Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.847036 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mbljb" event={"ID":"bba4c7ae-ab90-4f36-b529-0fb96008204c","Type":"ContainerStarted","Data":"9b5a098532fa8740c011aee12d5d1bef51bef459cd5acf1114dd8daf58f5fd0c"} Sep 29 13:52:33 crc kubenswrapper[4869]: I0929 13:52:33.848766 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" event={"ID":"a1343538-595b-4e8b-9c3a-dbb5abd2607d","Type":"ContainerStarted","Data":"e35b831968d00d05a13617f71dd3cd1402536421df48eea68c26db2d6ebd85b8"} Sep 29 13:52:34 crc kubenswrapper[4869]: I0929 13:52:34.142346 4869 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-t5w66" Sep 29 13:52:34 crc kubenswrapper[4869]: I0929 13:52:34.148364 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-mbrxl" Sep 29 13:52:34 crc kubenswrapper[4869]: I0929 13:52:34.357273 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-mbrxl"] Sep 29 13:52:34 crc kubenswrapper[4869]: W0929 13:52:34.368452 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd49de4d2_6830_45ee_adf5_57fa0cfc58ec.slice/crio-2e32e492a57fb93aaa082109243ffd175de1f1ac73bc08433dd5758a86dd2b3c WatchSource:0}: Error finding container 2e32e492a57fb93aaa082109243ffd175de1f1ac73bc08433dd5758a86dd2b3c: Status 404 returned error can't find the container with id 2e32e492a57fb93aaa082109243ffd175de1f1ac73bc08433dd5758a86dd2b3c Sep 29 13:52:34 crc kubenswrapper[4869]: I0929 13:52:34.862683 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-mbrxl" event={"ID":"d49de4d2-6830-45ee-adf5-57fa0cfc58ec","Type":"ContainerStarted","Data":"2e32e492a57fb93aaa082109243ffd175de1f1ac73bc08433dd5758a86dd2b3c"} Sep 29 13:52:37 crc kubenswrapper[4869]: I0929 13:52:37.887271 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mbljb" event={"ID":"bba4c7ae-ab90-4f36-b529-0fb96008204c","Type":"ContainerStarted","Data":"ef5146d4afaeb0a40fef93a0d8c8b76214577a207ec25be0d8668ccffb08fcde"} Sep 29 13:52:37 crc kubenswrapper[4869]: I0929 13:52:37.889453 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" event={"ID":"a1343538-595b-4e8b-9c3a-dbb5abd2607d","Type":"ContainerStarted","Data":"eb7669757c40b4ef643d37d1dccce6b90d5cdbd69f87290f29f1700a639fbb3b"} Sep 29 13:52:37 crc kubenswrapper[4869]: I0929 13:52:37.889656 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" Sep 29 13:52:37 crc kubenswrapper[4869]: I0929 13:52:37.892607 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-mbrxl" event={"ID":"d49de4d2-6830-45ee-adf5-57fa0cfc58ec","Type":"ContainerStarted","Data":"249a3c9e1e8d2ec412c12cf2c01e789aad78dc0e9fde3a52863c5588089fd861"} Sep 29 13:52:37 crc kubenswrapper[4869]: I0929 13:52:37.908154 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-mbljb" podStartSLOduration=2.647979787 podStartE2EDuration="5.908129217s" podCreationTimestamp="2025-09-29 13:52:32 +0000 UTC" firstStartedPulling="2025-09-29 13:52:33.562643789 +0000 UTC m=+680.003288109" lastFinishedPulling="2025-09-29 13:52:36.822793219 +0000 UTC m=+683.263437539" observedRunningTime="2025-09-29 13:52:37.903690024 +0000 UTC m=+684.344334354" watchObservedRunningTime="2025-09-29 13:52:37.908129217 +0000 UTC m=+684.348773537" Sep 29 13:52:37 crc kubenswrapper[4869]: I0929 13:52:37.928039 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" podStartSLOduration=2.7216237210000003 podStartE2EDuration="5.928007656s" podCreationTimestamp="2025-09-29 13:52:32 +0000 UTC" firstStartedPulling="2025-09-29 13:52:33.610051401 +0000 UTC m=+680.050695711" lastFinishedPulling="2025-09-29 13:52:36.816435336 +0000 UTC m=+683.257079646" observedRunningTime="2025-09-29 13:52:37.927067011 +0000 UTC m=+684.367711341" watchObservedRunningTime="2025-09-29 13:52:37.928007656 +0000 UTC m=+684.368651976" Sep 29 13:52:37 crc kubenswrapper[4869]: I0929 13:52:37.948736 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-mbrxl" podStartSLOduration=2.600929064 podStartE2EDuration="5.948710415s" podCreationTimestamp="2025-09-29 13:52:32 +0000 UTC" firstStartedPulling="2025-09-29 13:52:34.373360574 +0000 UTC m=+680.814004894" lastFinishedPulling="2025-09-29 13:52:37.721141925 +0000 UTC m=+684.161786245" observedRunningTime="2025-09-29 13:52:37.945910443 +0000 UTC m=+684.386554773" watchObservedRunningTime="2025-09-29 13:52:37.948710415 +0000 UTC m=+684.389354735" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.343481 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-l5274" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.491858 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mx9tj"] Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.492908 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovn-controller" containerID="cri-o://aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e" gracePeriod=30 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.492982 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4" gracePeriod=30 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.493045 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kube-rbac-proxy-node" containerID="cri-o://8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d" gracePeriod=30 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.493091 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="northd" containerID="cri-o://dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2" gracePeriod=30 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.493184 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovn-acl-logging" containerID="cri-o://96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436" gracePeriod=30 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.493293 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="sbdb" containerID="cri-o://c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8" gracePeriod=30 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.492982 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="nbdb" containerID="cri-o://c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf" gracePeriod=30 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.544000 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" containerID="cri-o://3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e" gracePeriod=30 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.781372 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/3.log" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.783991 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovn-acl-logging/0.log" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.784665 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovn-controller/0.log" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.785175 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843111 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-x4gh8"] Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843385 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="sbdb" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843405 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="sbdb" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843418 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovn-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843425 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovn-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843433 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kube-rbac-proxy-node" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843440 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kube-rbac-proxy-node" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843453 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843460 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843472 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843479 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843488 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843496 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843508 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="northd" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843514 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="northd" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843522 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovn-acl-logging" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843530 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovn-acl-logging" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843539 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kube-rbac-proxy-ovn-metrics" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843545 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kube-rbac-proxy-ovn-metrics" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843553 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="nbdb" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843559 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="nbdb" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843566 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kubecfg-setup" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843572 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kubecfg-setup" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843693 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843704 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="sbdb" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843714 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kube-rbac-proxy-ovn-metrics" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843723 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovn-acl-logging" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843731 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843738 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843747 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="kube-rbac-proxy-node" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843755 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="nbdb" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843764 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="northd" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843770 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovn-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843852 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843859 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.843869 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843874 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843977 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.843989 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerName="ovnkube-controller" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.845745 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885421 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-ovn-kubernetes\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885482 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-var-lib-openvswitch\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885509 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-netd\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885524 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-openvswitch\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885634 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-bin\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885660 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-log-socket\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885685 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-systemd\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885732 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-env-overrides\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885778 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btwpm\" (UniqueName: \"kubernetes.io/projected/5d03c451-25ce-46f9-9a14-f2ee29a89521-kube-api-access-btwpm\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885830 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-var-lib-cni-networks-ovn-kubernetes\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885858 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-etc-openvswitch\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885875 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-config\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885895 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-node-log\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885964 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovn-node-metrics-cert\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.885987 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-script-lib\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886007 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-systemd-units\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886032 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-slash\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886026 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-log-socket" (OuterVolumeSpecName: "log-socket") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886087 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886115 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886126 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-netns\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886105 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886176 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886134 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886142 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886206 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886225 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886148 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-ovn\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886475 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-kubelet\") pod \"5d03c451-25ce-46f9-9a14-f2ee29a89521\" (UID: \"5d03c451-25ce-46f9-9a14-f2ee29a89521\") " Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886572 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886674 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886768 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886240 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886232 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886264 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-node-log" (OuterVolumeSpecName: "node-log") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886280 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-slash" (OuterVolumeSpecName: "host-slash") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.886909 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887327 4869 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887357 4869 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887367 4869 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-openvswitch\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887377 4869 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-netd\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887390 4869 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-cni-bin\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887399 4869 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-log-socket\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887408 4869 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-env-overrides\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887420 4869 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887432 4869 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887442 4869 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-node-log\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887449 4869 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887458 4869 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887467 4869 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-systemd-units\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887475 4869 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-slash\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887484 4869 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-run-netns\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887495 4869 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-ovn\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.887504 4869 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-host-kubelet\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.893474 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d03c451-25ce-46f9-9a14-f2ee29a89521-kube-api-access-btwpm" (OuterVolumeSpecName: "kube-api-access-btwpm") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "kube-api-access-btwpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.894156 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.908267 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "5d03c451-25ce-46f9-9a14-f2ee29a89521" (UID: "5d03c451-25ce-46f9-9a14-f2ee29a89521"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.938137 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/2.log" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.938537 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/1.log" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.938579 4869 generic.go:334] "Generic (PLEG): container finished" podID="0e924d34-8790-41e8-a11a-91a1d0c625ca" containerID="efaa387899245c600fed4d298ba5d28dcdcd5f98768bb43a8cd8c078362e1d2c" exitCode=2 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.938636 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vs8mc" event={"ID":"0e924d34-8790-41e8-a11a-91a1d0c625ca","Type":"ContainerDied","Data":"efaa387899245c600fed4d298ba5d28dcdcd5f98768bb43a8cd8c078362e1d2c"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.938684 4869 scope.go:117] "RemoveContainer" containerID="c782b435f99e7b2e86544203bb96137ac24c4a0b79011fdde1d55153ec6cc436" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.939222 4869 scope.go:117] "RemoveContainer" containerID="efaa387899245c600fed4d298ba5d28dcdcd5f98768bb43a8cd8c078362e1d2c" Sep 29 13:52:43 crc kubenswrapper[4869]: E0929 13:52:43.939401 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-vs8mc_openshift-multus(0e924d34-8790-41e8-a11a-91a1d0c625ca)\"" pod="openshift-multus/multus-vs8mc" podUID="0e924d34-8790-41e8-a11a-91a1d0c625ca" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.943700 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovnkube-controller/3.log" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.947629 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovn-acl-logging/0.log" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948288 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mx9tj_5d03c451-25ce-46f9-9a14-f2ee29a89521/ovn-controller/0.log" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948773 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e" exitCode=0 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948810 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8" exitCode=0 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948821 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf" exitCode=0 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948830 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2" exitCode=0 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948839 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4" exitCode=0 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948846 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d" exitCode=0 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948825 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948909 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948927 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948942 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948853 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436" exitCode=143 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948959 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948977 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948995 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949011 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949021 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949030 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949039 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949047 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949054 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949062 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949069 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949077 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948944 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949087 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949662 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949725 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949790 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949874 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949935 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.949991 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950042 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950096 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950148 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950203 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950265 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950324 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950379 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950436 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950495 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950551 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950602 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950683 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950742 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950800 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950855 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.948961 4869 generic.go:334] "Generic (PLEG): container finished" podID="5d03c451-25ce-46f9-9a14-f2ee29a89521" containerID="aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e" exitCode=143 Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.950985 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mx9tj" event={"ID":"5d03c451-25ce-46f9-9a14-f2ee29a89521","Type":"ContainerDied","Data":"b638d05b433da167ad79d9cca68b9d3ad7cb45b26dc5a45c5d2bfb2c9b1ed1f2"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951045 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951091 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951153 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951221 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951286 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951351 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951420 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951473 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951522 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.951587 4869 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.971274 4869 scope.go:117] "RemoveContainer" containerID="3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.982772 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mx9tj"] Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988484 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-log-socket\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988572 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-cni-netd\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988688 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-ovnkube-script-lib\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988740 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-ovnkube-config\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988782 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-cni-bin\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988806 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-slash\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988887 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-systemd\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988909 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988958 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-env-overrides\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.988999 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-run-ovn-kubernetes\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989045 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njfhv\" (UniqueName: \"kubernetes.io/projected/297e5ac9-6211-4790-a138-02147ceb503a-kube-api-access-njfhv\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989070 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989100 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-etc-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989124 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/297e5ac9-6211-4790-a138-02147ceb503a-ovn-node-metrics-cert\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989170 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-kubelet\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989207 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-ovn\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989227 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-var-lib-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989333 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-run-netns\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989361 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-systemd-units\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989381 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-node-log\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989496 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btwpm\" (UniqueName: \"kubernetes.io/projected/5d03c451-25ce-46f9-9a14-f2ee29a89521-kube-api-access-btwpm\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989518 4869 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d03c451-25ce-46f9-9a14-f2ee29a89521-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.989534 4869 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d03c451-25ce-46f9-9a14-f2ee29a89521-run-systemd\") on node \"crc\" DevicePath \"\"" Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.990637 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mx9tj"] Sep 29 13:52:43 crc kubenswrapper[4869]: I0929 13:52:43.991162 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.008801 4869 scope.go:117] "RemoveContainer" containerID="c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.023413 4869 scope.go:117] "RemoveContainer" containerID="c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.039375 4869 scope.go:117] "RemoveContainer" containerID="dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.055825 4869 scope.go:117] "RemoveContainer" containerID="2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.069727 4869 scope.go:117] "RemoveContainer" containerID="8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.085561 4869 scope.go:117] "RemoveContainer" containerID="96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090751 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-run-netns\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090826 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-systemd-units\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090852 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-node-log\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090828 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-run-netns\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090882 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-log-socket\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090903 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-node-log\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090853 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-systemd-units\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090915 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-cni-netd\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090944 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-cni-netd\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090948 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-ovnkube-script-lib\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.090980 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-ovnkube-config\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091004 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-cni-bin\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091026 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-slash\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091054 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-systemd\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091079 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091106 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-env-overrides\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091133 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-run-ovn-kubernetes\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091158 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njfhv\" (UniqueName: \"kubernetes.io/projected/297e5ac9-6211-4790-a138-02147ceb503a-kube-api-access-njfhv\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091182 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091211 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-etc-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091233 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/297e5ac9-6211-4790-a138-02147ceb503a-ovn-node-metrics-cert\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091287 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-kubelet\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091312 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-ovn\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091336 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-var-lib-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091652 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-ovnkube-script-lib\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091688 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091703 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-ovn\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091728 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-var-lib-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091722 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-slash\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091730 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-kubelet\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091876 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091922 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-cni-bin\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091766 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-etc-openvswitch\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.091845 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-host-run-ovn-kubernetes\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.092034 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-run-systemd\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.092116 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/297e5ac9-6211-4790-a138-02147ceb503a-log-socket\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.093150 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-ovnkube-config\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.093262 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/297e5ac9-6211-4790-a138-02147ceb503a-env-overrides\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.097635 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/297e5ac9-6211-4790-a138-02147ceb503a-ovn-node-metrics-cert\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.104739 4869 scope.go:117] "RemoveContainer" containerID="aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.110384 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njfhv\" (UniqueName: \"kubernetes.io/projected/297e5ac9-6211-4790-a138-02147ceb503a-kube-api-access-njfhv\") pod \"ovnkube-node-x4gh8\" (UID: \"297e5ac9-6211-4790-a138-02147ceb503a\") " pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.120946 4869 scope.go:117] "RemoveContainer" containerID="e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.135517 4869 scope.go:117] "RemoveContainer" containerID="3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.136028 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": container with ID starting with 3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e not found: ID does not exist" containerID="3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.136086 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} err="failed to get container status \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": rpc error: code = NotFound desc = could not find container \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": container with ID starting with 3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.136136 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.136453 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": container with ID starting with 786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc not found: ID does not exist" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.136484 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} err="failed to get container status \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": rpc error: code = NotFound desc = could not find container \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": container with ID starting with 786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.136502 4869 scope.go:117] "RemoveContainer" containerID="c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.136772 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": container with ID starting with c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8 not found: ID does not exist" containerID="c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.136807 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} err="failed to get container status \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": rpc error: code = NotFound desc = could not find container \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": container with ID starting with c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.136825 4869 scope.go:117] "RemoveContainer" containerID="c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.137086 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": container with ID starting with c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf not found: ID does not exist" containerID="c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.137126 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} err="failed to get container status \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": rpc error: code = NotFound desc = could not find container \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": container with ID starting with c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.137155 4869 scope.go:117] "RemoveContainer" containerID="dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.137443 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": container with ID starting with dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2 not found: ID does not exist" containerID="dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.137476 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} err="failed to get container status \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": rpc error: code = NotFound desc = could not find container \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": container with ID starting with dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.137495 4869 scope.go:117] "RemoveContainer" containerID="2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.137828 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": container with ID starting with 2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4 not found: ID does not exist" containerID="2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.137854 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} err="failed to get container status \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": rpc error: code = NotFound desc = could not find container \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": container with ID starting with 2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.137871 4869 scope.go:117] "RemoveContainer" containerID="8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.138113 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": container with ID starting with 8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d not found: ID does not exist" containerID="8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.138136 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} err="failed to get container status \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": rpc error: code = NotFound desc = could not find container \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": container with ID starting with 8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.138153 4869 scope.go:117] "RemoveContainer" containerID="96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.138465 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": container with ID starting with 96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436 not found: ID does not exist" containerID="96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.138501 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} err="failed to get container status \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": rpc error: code = NotFound desc = could not find container \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": container with ID starting with 96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.138531 4869 scope.go:117] "RemoveContainer" containerID="aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.138886 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": container with ID starting with aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e not found: ID does not exist" containerID="aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.138915 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} err="failed to get container status \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": rpc error: code = NotFound desc = could not find container \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": container with ID starting with aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.138936 4869 scope.go:117] "RemoveContainer" containerID="e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b" Sep 29 13:52:44 crc kubenswrapper[4869]: E0929 13:52:44.139273 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": container with ID starting with e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b not found: ID does not exist" containerID="e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.139308 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} err="failed to get container status \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": rpc error: code = NotFound desc = could not find container \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": container with ID starting with e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.139327 4869 scope.go:117] "RemoveContainer" containerID="3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.139652 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} err="failed to get container status \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": rpc error: code = NotFound desc = could not find container \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": container with ID starting with 3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.139684 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.139944 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} err="failed to get container status \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": rpc error: code = NotFound desc = could not find container \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": container with ID starting with 786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.139966 4869 scope.go:117] "RemoveContainer" containerID="c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.140214 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} err="failed to get container status \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": rpc error: code = NotFound desc = could not find container \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": container with ID starting with c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.140238 4869 scope.go:117] "RemoveContainer" containerID="c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.140496 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} err="failed to get container status \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": rpc error: code = NotFound desc = could not find container \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": container with ID starting with c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.140524 4869 scope.go:117] "RemoveContainer" containerID="dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.140896 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} err="failed to get container status \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": rpc error: code = NotFound desc = could not find container \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": container with ID starting with dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.140922 4869 scope.go:117] "RemoveContainer" containerID="2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.141213 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} err="failed to get container status \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": rpc error: code = NotFound desc = could not find container \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": container with ID starting with 2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.141245 4869 scope.go:117] "RemoveContainer" containerID="8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.141520 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} err="failed to get container status \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": rpc error: code = NotFound desc = could not find container \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": container with ID starting with 8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.141548 4869 scope.go:117] "RemoveContainer" containerID="96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.141874 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} err="failed to get container status \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": rpc error: code = NotFound desc = could not find container \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": container with ID starting with 96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.141907 4869 scope.go:117] "RemoveContainer" containerID="aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.142180 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} err="failed to get container status \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": rpc error: code = NotFound desc = could not find container \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": container with ID starting with aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.142207 4869 scope.go:117] "RemoveContainer" containerID="e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.142531 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} err="failed to get container status \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": rpc error: code = NotFound desc = could not find container \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": container with ID starting with e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.142567 4869 scope.go:117] "RemoveContainer" containerID="3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.142903 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} err="failed to get container status \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": rpc error: code = NotFound desc = could not find container \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": container with ID starting with 3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.142930 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.143278 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} err="failed to get container status \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": rpc error: code = NotFound desc = could not find container \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": container with ID starting with 786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.143337 4869 scope.go:117] "RemoveContainer" containerID="c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.143794 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} err="failed to get container status \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": rpc error: code = NotFound desc = could not find container \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": container with ID starting with c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.143817 4869 scope.go:117] "RemoveContainer" containerID="c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.144068 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} err="failed to get container status \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": rpc error: code = NotFound desc = could not find container \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": container with ID starting with c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.144087 4869 scope.go:117] "RemoveContainer" containerID="dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.144359 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} err="failed to get container status \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": rpc error: code = NotFound desc = could not find container \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": container with ID starting with dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.144394 4869 scope.go:117] "RemoveContainer" containerID="2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.144681 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} err="failed to get container status \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": rpc error: code = NotFound desc = could not find container \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": container with ID starting with 2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.144705 4869 scope.go:117] "RemoveContainer" containerID="8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.144910 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} err="failed to get container status \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": rpc error: code = NotFound desc = could not find container \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": container with ID starting with 8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.144930 4869 scope.go:117] "RemoveContainer" containerID="96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.145135 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} err="failed to get container status \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": rpc error: code = NotFound desc = could not find container \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": container with ID starting with 96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.145153 4869 scope.go:117] "RemoveContainer" containerID="aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.145351 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} err="failed to get container status \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": rpc error: code = NotFound desc = could not find container \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": container with ID starting with aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.145368 4869 scope.go:117] "RemoveContainer" containerID="e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.145578 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} err="failed to get container status \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": rpc error: code = NotFound desc = could not find container \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": container with ID starting with e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.145593 4869 scope.go:117] "RemoveContainer" containerID="3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.145830 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e"} err="failed to get container status \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": rpc error: code = NotFound desc = could not find container \"3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e\": container with ID starting with 3fc0a614fbbe91c09bfc6f0a81a44c07f87ee216a760efb1eb3970a5c9aecb9e not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.145848 4869 scope.go:117] "RemoveContainer" containerID="786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146053 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc"} err="failed to get container status \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": rpc error: code = NotFound desc = could not find container \"786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc\": container with ID starting with 786525b43a52d54d8d7d8536f4d3ec8343664aa12f552312891a58c40bc44fdc not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146074 4869 scope.go:117] "RemoveContainer" containerID="c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146263 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8"} err="failed to get container status \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": rpc error: code = NotFound desc = could not find container \"c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8\": container with ID starting with c30abecd91048ebc42f48c4905ae2b5112caf9d04c3c189883318b7ba20ee4d8 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146280 4869 scope.go:117] "RemoveContainer" containerID="c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146467 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf"} err="failed to get container status \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": rpc error: code = NotFound desc = could not find container \"c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf\": container with ID starting with c139eaf531254b52212b1ca21f75ee8c94025be7fdebce13dea1bd10976f22bf not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146484 4869 scope.go:117] "RemoveContainer" containerID="dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146690 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2"} err="failed to get container status \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": rpc error: code = NotFound desc = could not find container \"dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2\": container with ID starting with dae7e42283ce0711b845d136e292e3abdee9c64638f821411ee35ed419e87cd2 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146708 4869 scope.go:117] "RemoveContainer" containerID="2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146898 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4"} err="failed to get container status \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": rpc error: code = NotFound desc = could not find container \"2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4\": container with ID starting with 2ff03eb4d9372908d1918f7af4cd0851a43a298679d766800ea1049b31bcbde4 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.146912 4869 scope.go:117] "RemoveContainer" containerID="8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.147102 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d"} err="failed to get container status \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": rpc error: code = NotFound desc = could not find container \"8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d\": container with ID starting with 8b033ffced377d6de0a7bea9db6a45d0a74a2f87519cf29d162b31c04b07352d not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.147116 4869 scope.go:117] "RemoveContainer" containerID="96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.147302 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436"} err="failed to get container status \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": rpc error: code = NotFound desc = could not find container \"96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436\": container with ID starting with 96a8e7d23dbbb7f01692fb4acfcf1de31a9d6a8bab20dc497da7f98748078436 not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.147317 4869 scope.go:117] "RemoveContainer" containerID="aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.147501 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e"} err="failed to get container status \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": rpc error: code = NotFound desc = could not find container \"aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e\": container with ID starting with aad970bb59a93a866b27fb271c4f0fdef9d494f94fafad76a5e27db11c81bd2e not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.147514 4869 scope.go:117] "RemoveContainer" containerID="e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.147714 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b"} err="failed to get container status \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": rpc error: code = NotFound desc = could not find container \"e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b\": container with ID starting with e87b9f1d14e4cde9102baaa09dc32f01c852d8ed64ce510d5f991c8f00b5667b not found: ID does not exist" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.172876 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.255989 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d03c451-25ce-46f9-9a14-f2ee29a89521" path="/var/lib/kubelet/pods/5d03c451-25ce-46f9-9a14-f2ee29a89521/volumes" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.962226 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/2.log" Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.965789 4869 generic.go:334] "Generic (PLEG): container finished" podID="297e5ac9-6211-4790-a138-02147ceb503a" containerID="86cfb6ca5d149b9fd9d3fdbb87786714e458fa87fa3face03c5620214935444f" exitCode=0 Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.965857 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerDied","Data":"86cfb6ca5d149b9fd9d3fdbb87786714e458fa87fa3face03c5620214935444f"} Sep 29 13:52:44 crc kubenswrapper[4869]: I0929 13:52:44.965921 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"8d6411ae44462fffa06281185dbcccc80a7de66cc64f9f6d9810110604956629"} Sep 29 13:52:45 crc kubenswrapper[4869]: I0929 13:52:45.977580 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"c457477553d274c5bc29d69d75151b9c9c8da1b34723cb41dd484f1b7b548d33"} Sep 29 13:52:45 crc kubenswrapper[4869]: I0929 13:52:45.978436 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"bd1b8eff87afb6b31e70f5284c287dcac09b42e05590d2bb02478270250a3c80"} Sep 29 13:52:45 crc kubenswrapper[4869]: I0929 13:52:45.978450 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"6ea1d4d91d7592655fc535cde1e2f2b85a4e11f498f53bae9981feac29ab0899"} Sep 29 13:52:45 crc kubenswrapper[4869]: I0929 13:52:45.978461 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"f682c5715439caa4fb67e4fe06c4459618abc0a27c17d57f3bd1188fa80683a0"} Sep 29 13:52:45 crc kubenswrapper[4869]: I0929 13:52:45.978470 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"7e081ab7aa7517852f2fd0758c7b296cab80e33339abb727004fb41cbc5ad63f"} Sep 29 13:52:45 crc kubenswrapper[4869]: I0929 13:52:45.978482 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"27d121c82e9b067bc499df7fd4218542ac80e78fb2d7d283f6ed95b3f2b23c10"} Sep 29 13:52:47 crc kubenswrapper[4869]: I0929 13:52:47.999589 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"5d32a9dc44d31680873e25328d00675c4a7c699e076b24c6e716ff23e22428ed"} Sep 29 13:52:51 crc kubenswrapper[4869]: I0929 13:52:51.023036 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" event={"ID":"297e5ac9-6211-4790-a138-02147ceb503a","Type":"ContainerStarted","Data":"fcafaac926e03cc402d78d6ce9139eb4d0ddf91c3a04f8b4c06687d3d88b3fd3"} Sep 29 13:52:51 crc kubenswrapper[4869]: I0929 13:52:51.024053 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:51 crc kubenswrapper[4869]: I0929 13:52:51.056733 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:51 crc kubenswrapper[4869]: I0929 13:52:51.066430 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" podStartSLOduration=8.066404127 podStartE2EDuration="8.066404127s" podCreationTimestamp="2025-09-29 13:52:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:52:51.059949442 +0000 UTC m=+697.500593812" watchObservedRunningTime="2025-09-29 13:52:51.066404127 +0000 UTC m=+697.507048467" Sep 29 13:52:52 crc kubenswrapper[4869]: I0929 13:52:52.029389 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:52 crc kubenswrapper[4869]: I0929 13:52:52.029800 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:52 crc kubenswrapper[4869]: I0929 13:52:52.058321 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:52:58 crc kubenswrapper[4869]: I0929 13:52:58.241679 4869 scope.go:117] "RemoveContainer" containerID="efaa387899245c600fed4d298ba5d28dcdcd5f98768bb43a8cd8c078362e1d2c" Sep 29 13:52:58 crc kubenswrapper[4869]: E0929 13:52:58.241980 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-vs8mc_openshift-multus(0e924d34-8790-41e8-a11a-91a1d0c625ca)\"" pod="openshift-multus/multus-vs8mc" podUID="0e924d34-8790-41e8-a11a-91a1d0c625ca" Sep 29 13:53:10 crc kubenswrapper[4869]: I0929 13:53:10.243059 4869 scope.go:117] "RemoveContainer" containerID="efaa387899245c600fed4d298ba5d28dcdcd5f98768bb43a8cd8c078362e1d2c" Sep 29 13:53:11 crc kubenswrapper[4869]: I0929 13:53:11.149851 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vs8mc_0e924d34-8790-41e8-a11a-91a1d0c625ca/kube-multus/2.log" Sep 29 13:53:11 crc kubenswrapper[4869]: I0929 13:53:11.150301 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vs8mc" event={"ID":"0e924d34-8790-41e8-a11a-91a1d0c625ca","Type":"ContainerStarted","Data":"a85f1675086fafcfdd10147d9ca81b5ecf577886255063dd78ece381d55bac22"} Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.404186 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz"] Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.405563 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.407310 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.414867 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz"] Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.418956 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5brd\" (UniqueName: \"kubernetes.io/projected/64a7ea48-cd6e-4103-bb96-ab537c59c710-kube-api-access-k5brd\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.419240 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.419363 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.522648 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.522722 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.522774 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5brd\" (UniqueName: \"kubernetes.io/projected/64a7ea48-cd6e-4103-bb96-ab537c59c710-kube-api-access-k5brd\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.523713 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.529781 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.547109 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5brd\" (UniqueName: \"kubernetes.io/projected/64a7ea48-cd6e-4103-bb96-ab537c59c710-kube-api-access-k5brd\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.727441 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:12 crc kubenswrapper[4869]: I0929 13:53:12.917743 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz"] Sep 29 13:53:13 crc kubenswrapper[4869]: I0929 13:53:13.162217 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" event={"ID":"64a7ea48-cd6e-4103-bb96-ab537c59c710","Type":"ContainerStarted","Data":"3a618d38f1a0d0d59beb7639d1b51d21ca03bd8c63f56b9021aea8b30c3f246f"} Sep 29 13:53:13 crc kubenswrapper[4869]: I0929 13:53:13.162345 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" event={"ID":"64a7ea48-cd6e-4103-bb96-ab537c59c710","Type":"ContainerStarted","Data":"65f01f30c12fa35e056a22fc8f570c1d997603d36980da2b4ee5e5ee1f3e6771"} Sep 29 13:53:14 crc kubenswrapper[4869]: I0929 13:53:14.169238 4869 generic.go:334] "Generic (PLEG): container finished" podID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerID="3a618d38f1a0d0d59beb7639d1b51d21ca03bd8c63f56b9021aea8b30c3f246f" exitCode=0 Sep 29 13:53:14 crc kubenswrapper[4869]: I0929 13:53:14.169305 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" event={"ID":"64a7ea48-cd6e-4103-bb96-ab537c59c710","Type":"ContainerDied","Data":"3a618d38f1a0d0d59beb7639d1b51d21ca03bd8c63f56b9021aea8b30c3f246f"} Sep 29 13:53:14 crc kubenswrapper[4869]: I0929 13:53:14.204962 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-x4gh8" Sep 29 13:53:16 crc kubenswrapper[4869]: I0929 13:53:16.185994 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" event={"ID":"64a7ea48-cd6e-4103-bb96-ab537c59c710","Type":"ContainerStarted","Data":"5c5347fdff923570b19cee3b21c006f6f45b2f4e3f32c72801998800cf8f26de"} Sep 29 13:53:18 crc kubenswrapper[4869]: I0929 13:53:18.200777 4869 generic.go:334] "Generic (PLEG): container finished" podID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerID="5c5347fdff923570b19cee3b21c006f6f45b2f4e3f32c72801998800cf8f26de" exitCode=0 Sep 29 13:53:18 crc kubenswrapper[4869]: I0929 13:53:18.200838 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" event={"ID":"64a7ea48-cd6e-4103-bb96-ab537c59c710","Type":"ContainerDied","Data":"5c5347fdff923570b19cee3b21c006f6f45b2f4e3f32c72801998800cf8f26de"} Sep 29 13:53:19 crc kubenswrapper[4869]: I0929 13:53:19.212703 4869 generic.go:334] "Generic (PLEG): container finished" podID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerID="f96dca02fc9477084db83ec1065c30f52c016ef4cdf3b42b81367032a13f2a82" exitCode=0 Sep 29 13:53:19 crc kubenswrapper[4869]: I0929 13:53:19.212787 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" event={"ID":"64a7ea48-cd6e-4103-bb96-ab537c59c710","Type":"ContainerDied","Data":"f96dca02fc9477084db83ec1065c30f52c016ef4cdf3b42b81367032a13f2a82"} Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.466516 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.629586 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-bundle\") pod \"64a7ea48-cd6e-4103-bb96-ab537c59c710\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.630048 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-util\") pod \"64a7ea48-cd6e-4103-bb96-ab537c59c710\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.630114 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5brd\" (UniqueName: \"kubernetes.io/projected/64a7ea48-cd6e-4103-bb96-ab537c59c710-kube-api-access-k5brd\") pod \"64a7ea48-cd6e-4103-bb96-ab537c59c710\" (UID: \"64a7ea48-cd6e-4103-bb96-ab537c59c710\") " Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.632392 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-bundle" (OuterVolumeSpecName: "bundle") pod "64a7ea48-cd6e-4103-bb96-ab537c59c710" (UID: "64a7ea48-cd6e-4103-bb96-ab537c59c710"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.633306 4869 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.636490 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64a7ea48-cd6e-4103-bb96-ab537c59c710-kube-api-access-k5brd" (OuterVolumeSpecName: "kube-api-access-k5brd") pod "64a7ea48-cd6e-4103-bb96-ab537c59c710" (UID: "64a7ea48-cd6e-4103-bb96-ab537c59c710"). InnerVolumeSpecName "kube-api-access-k5brd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.652158 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-util" (OuterVolumeSpecName: "util") pod "64a7ea48-cd6e-4103-bb96-ab537c59c710" (UID: "64a7ea48-cd6e-4103-bb96-ab537c59c710"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.657217 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.657283 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.735213 4869 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/64a7ea48-cd6e-4103-bb96-ab537c59c710-util\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:20 crc kubenswrapper[4869]: I0929 13:53:20.735263 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5brd\" (UniqueName: \"kubernetes.io/projected/64a7ea48-cd6e-4103-bb96-ab537c59c710-kube-api-access-k5brd\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:21 crc kubenswrapper[4869]: I0929 13:53:21.226667 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" event={"ID":"64a7ea48-cd6e-4103-bb96-ab537c59c710","Type":"ContainerDied","Data":"65f01f30c12fa35e056a22fc8f570c1d997603d36980da2b4ee5e5ee1f3e6771"} Sep 29 13:53:21 crc kubenswrapper[4869]: I0929 13:53:21.226724 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65f01f30c12fa35e056a22fc8f570c1d997603d36980da2b4ee5e5ee1f3e6771" Sep 29 13:53:21 crc kubenswrapper[4869]: I0929 13:53:21.226837 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.514030 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v"] Sep 29 13:53:29 crc kubenswrapper[4869]: E0929 13:53:29.514807 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerName="extract" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.514825 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerName="extract" Sep 29 13:53:29 crc kubenswrapper[4869]: E0929 13:53:29.514836 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerName="pull" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.514842 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerName="pull" Sep 29 13:53:29 crc kubenswrapper[4869]: E0929 13:53:29.514859 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerName="util" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.514864 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerName="util" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.515001 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="64a7ea48-cd6e-4103-bb96-ab537c59c710" containerName="extract" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.515508 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.517892 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-674lg" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.518199 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.519222 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.526262 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v"] Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.644004 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t"] Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.645001 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" Sep 29 13:53:29 crc kubenswrapper[4869]: W0929 13:53:29.647186 4869 reflector.go:561] object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-wn5bn": failed to list *v1.Secret: secrets "obo-prometheus-operator-admission-webhook-dockercfg-wn5bn" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Sep 29 13:53:29 crc kubenswrapper[4869]: W0929 13:53:29.647223 4869 reflector.go:561] object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert": failed to list *v1.Secret: secrets "obo-prometheus-operator-admission-webhook-service-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Sep 29 13:53:29 crc kubenswrapper[4869]: E0929 13:53:29.647300 4869 reflector.go:158] "Unhandled Error" err="object-\"openshift-operators\"/\"obo-prometheus-operator-admission-webhook-service-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"obo-prometheus-operator-admission-webhook-service-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Sep 29 13:53:29 crc kubenswrapper[4869]: E0929 13:53:29.647243 4869 reflector.go:158] "Unhandled Error" err="object-\"openshift-operators\"/\"obo-prometheus-operator-admission-webhook-dockercfg-wn5bn\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"obo-prometheus-operator-admission-webhook-dockercfg-wn5bn\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.654459 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvtw9\" (UniqueName: \"kubernetes.io/projected/bfd220db-47be-491b-bfe7-5962c10c099b-kube-api-access-kvtw9\") pod \"obo-prometheus-operator-7c8cf85677-7gl6v\" (UID: \"bfd220db-47be-491b-bfe7-5962c10c099b\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.659265 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft"] Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.660380 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.673400 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t"] Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.681739 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft"] Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.755787 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvtw9\" (UniqueName: \"kubernetes.io/projected/bfd220db-47be-491b-bfe7-5962c10c099b-kube-api-access-kvtw9\") pod \"obo-prometheus-operator-7c8cf85677-7gl6v\" (UID: \"bfd220db-47be-491b-bfe7-5962c10c099b\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.756175 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5d741f06-f7fa-4727-bf0b-4047c52949de-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft\" (UID: \"5d741f06-f7fa-4727-bf0b-4047c52949de\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.756327 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5d741f06-f7fa-4727-bf0b-4047c52949de-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft\" (UID: \"5d741f06-f7fa-4727-bf0b-4047c52949de\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.756477 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/81b26c9f-a443-43c7-bebe-4f5d9b5261b5-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t\" (UID: \"81b26c9f-a443-43c7-bebe-4f5d9b5261b5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.756599 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/81b26c9f-a443-43c7-bebe-4f5d9b5261b5-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t\" (UID: \"81b26c9f-a443-43c7-bebe-4f5d9b5261b5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.784790 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvtw9\" (UniqueName: \"kubernetes.io/projected/bfd220db-47be-491b-bfe7-5962c10c099b-kube-api-access-kvtw9\") pod \"obo-prometheus-operator-7c8cf85677-7gl6v\" (UID: \"bfd220db-47be-491b-bfe7-5962c10c099b\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.834164 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.842093 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-527r2"] Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.842837 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.845374 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-sj6cl" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.845689 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.858120 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/81b26c9f-a443-43c7-bebe-4f5d9b5261b5-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t\" (UID: \"81b26c9f-a443-43c7-bebe-4f5d9b5261b5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.858229 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5d741f06-f7fa-4727-bf0b-4047c52949de-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft\" (UID: \"5d741f06-f7fa-4727-bf0b-4047c52949de\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.858262 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5d741f06-f7fa-4727-bf0b-4047c52949de-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft\" (UID: \"5d741f06-f7fa-4727-bf0b-4047c52949de\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.858319 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/81b26c9f-a443-43c7-bebe-4f5d9b5261b5-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t\" (UID: \"81b26c9f-a443-43c7-bebe-4f5d9b5261b5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.866487 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-527r2"] Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.959977 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-527r2\" (UID: \"6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad\") " pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.960058 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4xmp\" (UniqueName: \"kubernetes.io/projected/6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad-kube-api-access-q4xmp\") pod \"observability-operator-cc5f78dfc-527r2\" (UID: \"6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad\") " pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.974377 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-n98tr"] Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.975587 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.987488 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-5nmpd" Sep 29 13:53:29 crc kubenswrapper[4869]: I0929 13:53:29.991728 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-n98tr"] Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.061143 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-527r2\" (UID: \"6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad\") " pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.061236 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4xmp\" (UniqueName: \"kubernetes.io/projected/6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad-kube-api-access-q4xmp\") pod \"observability-operator-cc5f78dfc-527r2\" (UID: \"6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad\") " pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.061282 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg7hg\" (UniqueName: \"kubernetes.io/projected/553217bb-4f98-4170-94e2-c809a866c927-kube-api-access-pg7hg\") pod \"perses-operator-54bc95c9fb-n98tr\" (UID: \"553217bb-4f98-4170-94e2-c809a866c927\") " pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.061360 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/553217bb-4f98-4170-94e2-c809a866c927-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-n98tr\" (UID: \"553217bb-4f98-4170-94e2-c809a866c927\") " pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.079940 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4xmp\" (UniqueName: \"kubernetes.io/projected/6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad-kube-api-access-q4xmp\") pod \"observability-operator-cc5f78dfc-527r2\" (UID: \"6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad\") " pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.096583 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-527r2\" (UID: \"6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad\") " pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.142070 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v"] Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.163198 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg7hg\" (UniqueName: \"kubernetes.io/projected/553217bb-4f98-4170-94e2-c809a866c927-kube-api-access-pg7hg\") pod \"perses-operator-54bc95c9fb-n98tr\" (UID: \"553217bb-4f98-4170-94e2-c809a866c927\") " pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.163295 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/553217bb-4f98-4170-94e2-c809a866c927-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-n98tr\" (UID: \"553217bb-4f98-4170-94e2-c809a866c927\") " pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.164066 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/553217bb-4f98-4170-94e2-c809a866c927-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-n98tr\" (UID: \"553217bb-4f98-4170-94e2-c809a866c927\") " pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.192412 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg7hg\" (UniqueName: \"kubernetes.io/projected/553217bb-4f98-4170-94e2-c809a866c927-kube-api-access-pg7hg\") pod \"perses-operator-54bc95c9fb-n98tr\" (UID: \"553217bb-4f98-4170-94e2-c809a866c927\") " pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.207449 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.324137 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.329103 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v" event={"ID":"bfd220db-47be-491b-bfe7-5962c10c099b","Type":"ContainerStarted","Data":"63b8d9f7c7266a624fc0b1ad63289b387f3f0e3a25aa848b152fef18d11560bf"} Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.648164 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-527r2"] Sep 29 13:53:30 crc kubenswrapper[4869]: W0929 13:53:30.651387 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6edcb3e2_2dbc_4d9c_a762_13d7cf4fdcad.slice/crio-c0513916e677de1db1391c45f8d8473b1c5f5e9e4414c9d53528dc6572a00be8 WatchSource:0}: Error finding container c0513916e677de1db1391c45f8d8473b1c5f5e9e4414c9d53528dc6572a00be8: Status 404 returned error can't find the container with id c0513916e677de1db1391c45f8d8473b1c5f5e9e4414c9d53528dc6572a00be8 Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.755254 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.765835 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/81b26c9f-a443-43c7-bebe-4f5d9b5261b5-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t\" (UID: \"81b26c9f-a443-43c7-bebe-4f5d9b5261b5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.765925 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/81b26c9f-a443-43c7-bebe-4f5d9b5261b5-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t\" (UID: \"81b26c9f-a443-43c7-bebe-4f5d9b5261b5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.769009 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5d741f06-f7fa-4727-bf0b-4047c52949de-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft\" (UID: \"5d741f06-f7fa-4727-bf0b-4047c52949de\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.769110 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5d741f06-f7fa-4727-bf0b-4047c52949de-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft\" (UID: \"5d741f06-f7fa-4727-bf0b-4047c52949de\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" Sep 29 13:53:30 crc kubenswrapper[4869]: I0929 13:53:30.921435 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-n98tr"] Sep 29 13:53:30 crc kubenswrapper[4869]: W0929 13:53:30.929313 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod553217bb_4f98_4170_94e2_c809a866c927.slice/crio-25d1a022d092e94888780888e09d41c6e954ce17fec6a45fd5a53c7314c90d20 WatchSource:0}: Error finding container 25d1a022d092e94888780888e09d41c6e954ce17fec6a45fd5a53c7314c90d20: Status 404 returned error can't find the container with id 25d1a022d092e94888780888e09d41c6e954ce17fec6a45fd5a53c7314c90d20 Sep 29 13:53:31 crc kubenswrapper[4869]: I0929 13:53:31.205712 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-wn5bn" Sep 29 13:53:31 crc kubenswrapper[4869]: I0929 13:53:31.207458 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" Sep 29 13:53:31 crc kubenswrapper[4869]: I0929 13:53:31.210541 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" Sep 29 13:53:31 crc kubenswrapper[4869]: I0929 13:53:31.373539 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-cc5f78dfc-527r2" event={"ID":"6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad","Type":"ContainerStarted","Data":"c0513916e677de1db1391c45f8d8473b1c5f5e9e4414c9d53528dc6572a00be8"} Sep 29 13:53:31 crc kubenswrapper[4869]: I0929 13:53:31.388728 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" event={"ID":"553217bb-4f98-4170-94e2-c809a866c927","Type":"ContainerStarted","Data":"25d1a022d092e94888780888e09d41c6e954ce17fec6a45fd5a53c7314c90d20"} Sep 29 13:53:31 crc kubenswrapper[4869]: I0929 13:53:31.642951 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft"] Sep 29 13:53:31 crc kubenswrapper[4869]: W0929 13:53:31.688453 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d741f06_f7fa_4727_bf0b_4047c52949de.slice/crio-cf768dbc417cdca2f4003bce9e3bc1a56a1f38a14576cecef6eda69983403c92 WatchSource:0}: Error finding container cf768dbc417cdca2f4003bce9e3bc1a56a1f38a14576cecef6eda69983403c92: Status 404 returned error can't find the container with id cf768dbc417cdca2f4003bce9e3bc1a56a1f38a14576cecef6eda69983403c92 Sep 29 13:53:31 crc kubenswrapper[4869]: I0929 13:53:31.887095 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t"] Sep 29 13:53:32 crc kubenswrapper[4869]: I0929 13:53:32.407431 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" event={"ID":"5d741f06-f7fa-4727-bf0b-4047c52949de","Type":"ContainerStarted","Data":"cf768dbc417cdca2f4003bce9e3bc1a56a1f38a14576cecef6eda69983403c92"} Sep 29 13:53:32 crc kubenswrapper[4869]: I0929 13:53:32.409113 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" event={"ID":"81b26c9f-a443-43c7-bebe-4f5d9b5261b5","Type":"ContainerStarted","Data":"f87eea0d901208d68cf4ab13b5234bf36eefa6e8add802ffb5d8daf3f98ec34b"} Sep 29 13:53:37 crc kubenswrapper[4869]: I0929 13:53:37.110298 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vhbp8"] Sep 29 13:53:37 crc kubenswrapper[4869]: I0929 13:53:37.111305 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" podUID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" containerName="controller-manager" containerID="cri-o://45fb2495ec39c8ff8a11c7bec4263955f46efd4c6c3c3791d06bb77514f764fc" gracePeriod=30 Sep 29 13:53:37 crc kubenswrapper[4869]: I0929 13:53:37.123338 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k"] Sep 29 13:53:37 crc kubenswrapper[4869]: I0929 13:53:37.123629 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" podUID="d633942f-d083-44fd-b0e0-1dce9b0fdf0b" containerName="route-controller-manager" containerID="cri-o://bac57fe304365ef754f0dc2b7bddb939c8a94e0a4b654fa59fd73460f8cb9f65" gracePeriod=30 Sep 29 13:53:37 crc kubenswrapper[4869]: I0929 13:53:37.493651 4869 generic.go:334] "Generic (PLEG): container finished" podID="d633942f-d083-44fd-b0e0-1dce9b0fdf0b" containerID="bac57fe304365ef754f0dc2b7bddb939c8a94e0a4b654fa59fd73460f8cb9f65" exitCode=0 Sep 29 13:53:37 crc kubenswrapper[4869]: I0929 13:53:37.493861 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" event={"ID":"d633942f-d083-44fd-b0e0-1dce9b0fdf0b","Type":"ContainerDied","Data":"bac57fe304365ef754f0dc2b7bddb939c8a94e0a4b654fa59fd73460f8cb9f65"} Sep 29 13:53:37 crc kubenswrapper[4869]: I0929 13:53:37.498557 4869 generic.go:334] "Generic (PLEG): container finished" podID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" containerID="45fb2495ec39c8ff8a11c7bec4263955f46efd4c6c3c3791d06bb77514f764fc" exitCode=0 Sep 29 13:53:37 crc kubenswrapper[4869]: I0929 13:53:37.498603 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" event={"ID":"e6c3c3e6-62ba-4301-bd4a-f5cafd385463","Type":"ContainerDied","Data":"45fb2495ec39c8ff8a11c7bec4263955f46efd4c6c3c3791d06bb77514f764fc"} Sep 29 13:53:45 crc kubenswrapper[4869]: I0929 13:53:45.240724 4869 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-vhbp8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Sep 29 13:53:45 crc kubenswrapper[4869]: I0929 13:53:45.241364 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" podUID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Sep 29 13:53:46 crc kubenswrapper[4869]: I0929 13:53:46.016169 4869 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-7467k container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Sep 29 13:53:46 crc kubenswrapper[4869]: I0929 13:53:46.016699 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" podUID="d633942f-d083-44fd-b0e0-1dce9b0fdf0b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Sep 29 13:53:49 crc kubenswrapper[4869]: E0929 13:53:49.020048 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:e54c1e1301be66933f3ecb01d5a0ca27f58aabfd905b18b7d057bbf23bdb7b0d" Sep 29 13:53:49 crc kubenswrapper[4869]: E0929 13:53:49.020567 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:e54c1e1301be66933f3ecb01d5a0ca27f58aabfd905b18b7d057bbf23bdb7b0d,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.2.2,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft_openshift-operators(5d741f06-f7fa-4727-bf0b-4047c52949de): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Sep 29 13:53:49 crc kubenswrapper[4869]: E0929 13:53:49.022191 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" podUID="5d741f06-f7fa-4727-bf0b-4047c52949de" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.112471 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.114646 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.197723 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm"] Sep 29 13:53:49 crc kubenswrapper[4869]: E0929 13:53:49.199473 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" containerName="controller-manager" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.199497 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" containerName="controller-manager" Sep 29 13:53:49 crc kubenswrapper[4869]: E0929 13:53:49.199522 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d633942f-d083-44fd-b0e0-1dce9b0fdf0b" containerName="route-controller-manager" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.199529 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d633942f-d083-44fd-b0e0-1dce9b0fdf0b" containerName="route-controller-manager" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.199679 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d633942f-d083-44fd-b0e0-1dce9b0fdf0b" containerName="route-controller-manager" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.199691 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" containerName="controller-manager" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.200260 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.219989 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm"] Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225412 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-serving-cert\") pod \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225497 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-proxy-ca-bundles\") pod \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225545 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-client-ca\") pod \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225597 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-config\") pod \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225718 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-serving-cert\") pod \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225746 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-client-ca\") pod \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225772 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptmh5\" (UniqueName: \"kubernetes.io/projected/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-kube-api-access-ptmh5\") pod \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\" (UID: \"d633942f-d083-44fd-b0e0-1dce9b0fdf0b\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225798 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbv4m\" (UniqueName: \"kubernetes.io/projected/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-kube-api-access-zbv4m\") pod \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225820 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-config\") pod \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\" (UID: \"e6c3c3e6-62ba-4301-bd4a-f5cafd385463\") " Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.225969 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-client-ca\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.226025 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf26442c-81b8-4f2b-a23a-d742f9c65d93-serving-cert\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.226041 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plztj\" (UniqueName: \"kubernetes.io/projected/cf26442c-81b8-4f2b-a23a-d742f9c65d93-kube-api-access-plztj\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.226065 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-config\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.226102 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-proxy-ca-bundles\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.228249 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-client-ca" (OuterVolumeSpecName: "client-ca") pod "d633942f-d083-44fd-b0e0-1dce9b0fdf0b" (UID: "d633942f-d083-44fd-b0e0-1dce9b0fdf0b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.228303 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-config" (OuterVolumeSpecName: "config") pod "e6c3c3e6-62ba-4301-bd4a-f5cafd385463" (UID: "e6c3c3e6-62ba-4301-bd4a-f5cafd385463"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.228346 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-client-ca" (OuterVolumeSpecName: "client-ca") pod "e6c3c3e6-62ba-4301-bd4a-f5cafd385463" (UID: "e6c3c3e6-62ba-4301-bd4a-f5cafd385463"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.228752 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "e6c3c3e6-62ba-4301-bd4a-f5cafd385463" (UID: "e6c3c3e6-62ba-4301-bd4a-f5cafd385463"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.228840 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-config" (OuterVolumeSpecName: "config") pod "d633942f-d083-44fd-b0e0-1dce9b0fdf0b" (UID: "d633942f-d083-44fd-b0e0-1dce9b0fdf0b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.235430 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e6c3c3e6-62ba-4301-bd4a-f5cafd385463" (UID: "e6c3c3e6-62ba-4301-bd4a-f5cafd385463"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.239660 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-kube-api-access-ptmh5" (OuterVolumeSpecName: "kube-api-access-ptmh5") pod "d633942f-d083-44fd-b0e0-1dce9b0fdf0b" (UID: "d633942f-d083-44fd-b0e0-1dce9b0fdf0b"). InnerVolumeSpecName "kube-api-access-ptmh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.240904 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d633942f-d083-44fd-b0e0-1dce9b0fdf0b" (UID: "d633942f-d083-44fd-b0e0-1dce9b0fdf0b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.245473 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-kube-api-access-zbv4m" (OuterVolumeSpecName: "kube-api-access-zbv4m") pod "e6c3c3e6-62ba-4301-bd4a-f5cafd385463" (UID: "e6c3c3e6-62ba-4301-bd4a-f5cafd385463"). InnerVolumeSpecName "kube-api-access-zbv4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327529 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-proxy-ca-bundles\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327664 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-client-ca\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327723 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf26442c-81b8-4f2b-a23a-d742f9c65d93-serving-cert\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327756 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plztj\" (UniqueName: \"kubernetes.io/projected/cf26442c-81b8-4f2b-a23a-d742f9c65d93-kube-api-access-plztj\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327793 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-config\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327846 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327864 4869 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327881 4869 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-client-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327896 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327910 4869 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327921 4869 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-client-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327933 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptmh5\" (UniqueName: \"kubernetes.io/projected/d633942f-d083-44fd-b0e0-1dce9b0fdf0b-kube-api-access-ptmh5\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327945 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbv4m\" (UniqueName: \"kubernetes.io/projected/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-kube-api-access-zbv4m\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.327957 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6c3c3e6-62ba-4301-bd4a-f5cafd385463-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.329537 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-client-ca\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.329829 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-config\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.331686 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cf26442c-81b8-4f2b-a23a-d742f9c65d93-proxy-ca-bundles\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.344740 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf26442c-81b8-4f2b-a23a-d742f9c65d93-serving-cert\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.354294 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plztj\" (UniqueName: \"kubernetes.io/projected/cf26442c-81b8-4f2b-a23a-d742f9c65d93-kube-api-access-plztj\") pod \"controller-manager-5f46fb56f9-9ltmm\" (UID: \"cf26442c-81b8-4f2b-a23a-d742f9c65d93\") " pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.610045 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.610063 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k" event={"ID":"d633942f-d083-44fd-b0e0-1dce9b0fdf0b","Type":"ContainerDied","Data":"c51b829e1b6fd72dae19c12b7eb95cb8e99daf72ea7575e7955bf6935a8d620c"} Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.610138 4869 scope.go:117] "RemoveContainer" containerID="bac57fe304365ef754f0dc2b7bddb939c8a94e0a4b654fa59fd73460f8cb9f65" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.612362 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" event={"ID":"553217bb-4f98-4170-94e2-c809a866c927","Type":"ContainerStarted","Data":"e32aa41bcd1db55d535ab4ece043a5c22510da13f237e23e9fdd7488eecce1dc"} Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.612759 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.617340 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" event={"ID":"81b26c9f-a443-43c7-bebe-4f5d9b5261b5","Type":"ContainerStarted","Data":"221b8a3c99dde14e6c471154470b2efec78c424b68e8bab73542e92db46398ec"} Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.619567 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.620357 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-vhbp8" event={"ID":"e6c3c3e6-62ba-4301-bd4a-f5cafd385463","Type":"ContainerDied","Data":"d9c56dbc24de65b361db1b57c6f20b8a4def4e40fcc39ebfa8cb714a97e60c59"} Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.622211 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-cc5f78dfc-527r2" event={"ID":"6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad","Type":"ContainerStarted","Data":"c09356943f820876b90fc0f9106d95be5c326fe9cb4871d48e941230cd5f8e53"} Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.622422 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.624821 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v" event={"ID":"bfd220db-47be-491b-bfe7-5962c10c099b","Type":"ContainerStarted","Data":"c9e617cb1e4a36e54ebe7b4f423bfd34357afe74ebf8651f7bf5f804b478bf77"} Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.625812 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-cc5f78dfc-527r2" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.630779 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.631778 4869 scope.go:117] "RemoveContainer" containerID="45fb2495ec39c8ff8a11c7bec4263955f46efd4c6c3c3791d06bb77514f764fc" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.710304 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t" podStartSLOduration=3.509196901 podStartE2EDuration="20.710280386s" podCreationTimestamp="2025-09-29 13:53:29 +0000 UTC" firstStartedPulling="2025-09-29 13:53:31.911911811 +0000 UTC m=+738.352556131" lastFinishedPulling="2025-09-29 13:53:49.112995306 +0000 UTC m=+755.553639616" observedRunningTime="2025-09-29 13:53:49.663676977 +0000 UTC m=+756.104321297" watchObservedRunningTime="2025-09-29 13:53:49.710280386 +0000 UTC m=+756.150924706" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.747021 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" podStartSLOduration=2.566763324 podStartE2EDuration="20.747002156s" podCreationTimestamp="2025-09-29 13:53:29 +0000 UTC" firstStartedPulling="2025-09-29 13:53:30.933782941 +0000 UTC m=+737.374427251" lastFinishedPulling="2025-09-29 13:53:49.114021763 +0000 UTC m=+755.554666083" observedRunningTime="2025-09-29 13:53:49.707689594 +0000 UTC m=+756.148333914" watchObservedRunningTime="2025-09-29 13:53:49.747002156 +0000 UTC m=+756.187646476" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.756109 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k"] Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.762692 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7467k"] Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.781932 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-cc5f78dfc-527r2" podStartSLOduration=2.312691661 podStartE2EDuration="20.781902857s" podCreationTimestamp="2025-09-29 13:53:29 +0000 UTC" firstStartedPulling="2025-09-29 13:53:30.654480212 +0000 UTC m=+737.095124532" lastFinishedPulling="2025-09-29 13:53:49.123691408 +0000 UTC m=+755.564335728" observedRunningTime="2025-09-29 13:53:49.778938829 +0000 UTC m=+756.219583149" watchObservedRunningTime="2025-09-29 13:53:49.781902857 +0000 UTC m=+756.222547187" Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.812401 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vhbp8"] Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.816485 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vhbp8"] Sep 29 13:53:49 crc kubenswrapper[4869]: I0929 13:53:49.947430 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-7gl6v" podStartSLOduration=2.037130042 podStartE2EDuration="20.947407807s" podCreationTimestamp="2025-09-29 13:53:29 +0000 UTC" firstStartedPulling="2025-09-29 13:53:30.162070238 +0000 UTC m=+736.602714558" lastFinishedPulling="2025-09-29 13:53:49.072348003 +0000 UTC m=+755.512992323" observedRunningTime="2025-09-29 13:53:49.941702705 +0000 UTC m=+756.382347025" watchObservedRunningTime="2025-09-29 13:53:49.947407807 +0000 UTC m=+756.388052127" Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.160588 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm"] Sep 29 13:53:50 crc kubenswrapper[4869]: W0929 13:53:50.172698 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf26442c_81b8_4f2b_a23a_d742f9c65d93.slice/crio-2e3676cf280afcd72ae6c542288b5dc846c3391e13441e0f78b33b0f6788206a WatchSource:0}: Error finding container 2e3676cf280afcd72ae6c542288b5dc846c3391e13441e0f78b33b0f6788206a: Status 404 returned error can't find the container with id 2e3676cf280afcd72ae6c542288b5dc846c3391e13441e0f78b33b0f6788206a Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.256126 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d633942f-d083-44fd-b0e0-1dce9b0fdf0b" path="/var/lib/kubelet/pods/d633942f-d083-44fd-b0e0-1dce9b0fdf0b/volumes" Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.257383 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6c3c3e6-62ba-4301-bd4a-f5cafd385463" path="/var/lib/kubelet/pods/e6c3c3e6-62ba-4301-bd4a-f5cafd385463/volumes" Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.300018 4869 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.631256 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" event={"ID":"cf26442c-81b8-4f2b-a23a-d742f9c65d93","Type":"ContainerStarted","Data":"8aa745832a01a3afa631c384e8f43349beb90d8af584660aa92db7698dbac315"} Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.631305 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" event={"ID":"cf26442c-81b8-4f2b-a23a-d742f9c65d93","Type":"ContainerStarted","Data":"2e3676cf280afcd72ae6c542288b5dc846c3391e13441e0f78b33b0f6788206a"} Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.634571 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.636278 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" event={"ID":"5d741f06-f7fa-4727-bf0b-4047c52949de","Type":"ContainerStarted","Data":"e2a45ee50c86d035fde2fcfdfcd0604e8ef56001159fd31cbd48569f9660f177"} Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.653982 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" podStartSLOduration=13.653959322 podStartE2EDuration="13.653959322s" podCreationTimestamp="2025-09-29 13:53:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:53:50.652671402 +0000 UTC m=+757.093315722" watchObservedRunningTime="2025-09-29 13:53:50.653959322 +0000 UTC m=+757.094603632" Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.656836 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.656902 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.660336 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5f46fb56f9-9ltmm" Sep 29 13:53:50 crc kubenswrapper[4869]: I0929 13:53:50.680740 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft" podStartSLOduration=-9223372015.174078 podStartE2EDuration="21.680697102s" podCreationTimestamp="2025-09-29 13:53:29 +0000 UTC" firstStartedPulling="2025-09-29 13:53:31.697499995 +0000 UTC m=+738.138144325" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:53:50.678791211 +0000 UTC m=+757.119435531" watchObservedRunningTime="2025-09-29 13:53:50.680697102 +0000 UTC m=+757.121341422" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.194069 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v"] Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.195541 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.206551 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.206808 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.209154 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.209344 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.209165 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.209207 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.213479 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v"] Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.259416 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7edbc803-9317-426f-a580-1a4d15eac9ee-serving-cert\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.259470 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtvj5\" (UniqueName: \"kubernetes.io/projected/7edbc803-9317-426f-a580-1a4d15eac9ee-kube-api-access-rtvj5\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.259566 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7edbc803-9317-426f-a580-1a4d15eac9ee-client-ca\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.259622 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7edbc803-9317-426f-a580-1a4d15eac9ee-config\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.360453 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7edbc803-9317-426f-a580-1a4d15eac9ee-client-ca\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.360873 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7edbc803-9317-426f-a580-1a4d15eac9ee-config\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.360984 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7edbc803-9317-426f-a580-1a4d15eac9ee-serving-cert\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.361059 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtvj5\" (UniqueName: \"kubernetes.io/projected/7edbc803-9317-426f-a580-1a4d15eac9ee-kube-api-access-rtvj5\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.361724 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7edbc803-9317-426f-a580-1a4d15eac9ee-client-ca\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.362178 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7edbc803-9317-426f-a580-1a4d15eac9ee-config\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.368633 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7edbc803-9317-426f-a580-1a4d15eac9ee-serving-cert\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.380299 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtvj5\" (UniqueName: \"kubernetes.io/projected/7edbc803-9317-426f-a580-1a4d15eac9ee-kube-api-access-rtvj5\") pod \"route-controller-manager-54f98c5b94-tqx8v\" (UID: \"7edbc803-9317-426f-a580-1a4d15eac9ee\") " pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.522417 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:51 crc kubenswrapper[4869]: I0929 13:53:51.871226 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v"] Sep 29 13:53:52 crc kubenswrapper[4869]: I0929 13:53:52.655248 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" event={"ID":"7edbc803-9317-426f-a580-1a4d15eac9ee","Type":"ContainerStarted","Data":"af3abecad165f6eecc7413099bdfa3e3216c08bcf910e5818f21d73133c1b771"} Sep 29 13:53:52 crc kubenswrapper[4869]: I0929 13:53:52.655619 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" event={"ID":"7edbc803-9317-426f-a580-1a4d15eac9ee","Type":"ContainerStarted","Data":"2742b1800aed5cecfa3896b9facc96fdbe13eab8bef31d236f8f7bd3e53e7649"} Sep 29 13:53:52 crc kubenswrapper[4869]: I0929 13:53:52.698351 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" podStartSLOduration=15.698323918 podStartE2EDuration="15.698323918s" podCreationTimestamp="2025-09-29 13:53:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:53:52.695494882 +0000 UTC m=+759.136139212" watchObservedRunningTime="2025-09-29 13:53:52.698323918 +0000 UTC m=+759.138968238" Sep 29 13:53:53 crc kubenswrapper[4869]: I0929 13:53:53.660940 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:53:53 crc kubenswrapper[4869]: I0929 13:53:53.666676 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-54f98c5b94-tqx8v" Sep 29 13:54:00 crc kubenswrapper[4869]: I0929 13:54:00.327862 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-54bc95c9fb-n98tr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.202101 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6c4xr"] Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.205667 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.220535 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6c4xr"] Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.327919 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-utilities\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.328011 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-catalog-content\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.328068 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrz8c\" (UniqueName: \"kubernetes.io/projected/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-kube-api-access-nrz8c\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.429215 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrz8c\" (UniqueName: \"kubernetes.io/projected/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-kube-api-access-nrz8c\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.429310 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-utilities\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.429343 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-catalog-content\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.429923 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-catalog-content\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.430180 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-utilities\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.453832 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrz8c\" (UniqueName: \"kubernetes.io/projected/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-kube-api-access-nrz8c\") pod \"community-operators-6c4xr\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:15 crc kubenswrapper[4869]: I0929 13:54:15.524260 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:16 crc kubenswrapper[4869]: I0929 13:54:16.012834 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6c4xr"] Sep 29 13:54:16 crc kubenswrapper[4869]: W0929 13:54:16.020364 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21742ae5_eff5_4d27_b3f1_2ce5c02ab735.slice/crio-7d7420705573599f51dfb261be19d6c6cd25ee45c458e9020d41a0ddc4fb0a37 WatchSource:0}: Error finding container 7d7420705573599f51dfb261be19d6c6cd25ee45c458e9020d41a0ddc4fb0a37: Status 404 returned error can't find the container with id 7d7420705573599f51dfb261be19d6c6cd25ee45c458e9020d41a0ddc4fb0a37 Sep 29 13:54:16 crc kubenswrapper[4869]: I0929 13:54:16.884034 4869 generic.go:334] "Generic (PLEG): container finished" podID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerID="55dc1d9b5bfd21c1f5f8850a1f473f97c8640e7c1e121d92a5679065eb03aeae" exitCode=0 Sep 29 13:54:16 crc kubenswrapper[4869]: I0929 13:54:16.884121 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c4xr" event={"ID":"21742ae5-eff5-4d27-b3f1-2ce5c02ab735","Type":"ContainerDied","Data":"55dc1d9b5bfd21c1f5f8850a1f473f97c8640e7c1e121d92a5679065eb03aeae"} Sep 29 13:54:16 crc kubenswrapper[4869]: I0929 13:54:16.884198 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c4xr" event={"ID":"21742ae5-eff5-4d27-b3f1-2ce5c02ab735","Type":"ContainerStarted","Data":"7d7420705573599f51dfb261be19d6c6cd25ee45c458e9020d41a0ddc4fb0a37"} Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.616384 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9"] Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.617978 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.622716 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.628131 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9"] Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.682630 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-util\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.682697 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-bundle\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.682728 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdzhk\" (UniqueName: \"kubernetes.io/projected/d5ba66e2-b35f-49f1-81aa-5b3007724d39-kube-api-access-xdzhk\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.785154 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-util\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.785217 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-bundle\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.785254 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdzhk\" (UniqueName: \"kubernetes.io/projected/d5ba66e2-b35f-49f1-81aa-5b3007724d39-kube-api-access-xdzhk\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.785739 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-util\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.785854 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-bundle\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.807022 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdzhk\" (UniqueName: \"kubernetes.io/projected/d5ba66e2-b35f-49f1-81aa-5b3007724d39-kube-api-access-xdzhk\") pod \"9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:18 crc kubenswrapper[4869]: I0929 13:54:18.934780 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:19 crc kubenswrapper[4869]: I0929 13:54:19.434493 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9"] Sep 29 13:54:19 crc kubenswrapper[4869]: I0929 13:54:19.911365 4869 generic.go:334] "Generic (PLEG): container finished" podID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerID="d23797d6cd3e756f50e9d5c2cecd2d4c18617bdd68233b716936e8cb0e714bf7" exitCode=0 Sep 29 13:54:19 crc kubenswrapper[4869]: I0929 13:54:19.911464 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" event={"ID":"d5ba66e2-b35f-49f1-81aa-5b3007724d39","Type":"ContainerDied","Data":"d23797d6cd3e756f50e9d5c2cecd2d4c18617bdd68233b716936e8cb0e714bf7"} Sep 29 13:54:19 crc kubenswrapper[4869]: I0929 13:54:19.911899 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" event={"ID":"d5ba66e2-b35f-49f1-81aa-5b3007724d39","Type":"ContainerStarted","Data":"d5966566d8a86fb740fd8a3b6fca1e62bd3d32b166991053efa74a4eb224f68e"} Sep 29 13:54:19 crc kubenswrapper[4869]: I0929 13:54:19.917131 4869 generic.go:334] "Generic (PLEG): container finished" podID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerID="a3a14d02a1fae08b848d0d00d3012b5dace0e00f97a23dffe86c04a070fd9db1" exitCode=0 Sep 29 13:54:19 crc kubenswrapper[4869]: I0929 13:54:19.917230 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c4xr" event={"ID":"21742ae5-eff5-4d27-b3f1-2ce5c02ab735","Type":"ContainerDied","Data":"a3a14d02a1fae08b848d0d00d3012b5dace0e00f97a23dffe86c04a070fd9db1"} Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.657641 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.657719 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.657773 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.658394 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"044bc1bbf05f7114209acb0aca5c7026aa69010563b4d0217b04eb424198b8d6"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.658459 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://044bc1bbf05f7114209acb0aca5c7026aa69010563b4d0217b04eb424198b8d6" gracePeriod=600 Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.924668 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="044bc1bbf05f7114209acb0aca5c7026aa69010563b4d0217b04eb424198b8d6" exitCode=0 Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.924907 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"044bc1bbf05f7114209acb0aca5c7026aa69010563b4d0217b04eb424198b8d6"} Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.926411 4869 scope.go:117] "RemoveContainer" containerID="1cb08fbaefe8d34fe916a84832310adb83e37cae49b44f869d7b811ef6701bf9" Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.929337 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c4xr" event={"ID":"21742ae5-eff5-4d27-b3f1-2ce5c02ab735","Type":"ContainerStarted","Data":"f5d5957fbeca013bdb70a06e6901559c66e7c510be64b206434a886df8b49c10"} Sep 29 13:54:20 crc kubenswrapper[4869]: I0929 13:54:20.949951 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6c4xr" podStartSLOduration=2.484291 podStartE2EDuration="5.949932894s" podCreationTimestamp="2025-09-29 13:54:15 +0000 UTC" firstStartedPulling="2025-09-29 13:54:16.886495174 +0000 UTC m=+783.327139504" lastFinishedPulling="2025-09-29 13:54:20.352137078 +0000 UTC m=+786.792781398" observedRunningTime="2025-09-29 13:54:20.946177275 +0000 UTC m=+787.386821595" watchObservedRunningTime="2025-09-29 13:54:20.949932894 +0000 UTC m=+787.390577214" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.573032 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fkmml"] Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.575747 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.595725 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fkmml"] Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.730764 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-catalog-content\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.731169 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-utilities\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.731345 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rfq8\" (UniqueName: \"kubernetes.io/projected/7f085714-91ef-4e76-97eb-80ebc4438834-kube-api-access-2rfq8\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.832562 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rfq8\" (UniqueName: \"kubernetes.io/projected/7f085714-91ef-4e76-97eb-80ebc4438834-kube-api-access-2rfq8\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.832910 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-catalog-content\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.833043 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-utilities\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.833590 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-catalog-content\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.833669 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-utilities\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.855340 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rfq8\" (UniqueName: \"kubernetes.io/projected/7f085714-91ef-4e76-97eb-80ebc4438834-kube-api-access-2rfq8\") pod \"redhat-operators-fkmml\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.901141 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:21 crc kubenswrapper[4869]: I0929 13:54:21.946776 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"759af6dee641bff740e90c1320e6cbfb6e8ed5030cb0042b5634388f31d6067a"} Sep 29 13:54:22 crc kubenswrapper[4869]: I0929 13:54:22.351559 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fkmml"] Sep 29 13:54:22 crc kubenswrapper[4869]: I0929 13:54:22.956550 4869 generic.go:334] "Generic (PLEG): container finished" podID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerID="517c46bff70e50da4889b5031813a65e7fee2d583f22dff1415482de9d607202" exitCode=0 Sep 29 13:54:22 crc kubenswrapper[4869]: I0929 13:54:22.956649 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" event={"ID":"d5ba66e2-b35f-49f1-81aa-5b3007724d39","Type":"ContainerDied","Data":"517c46bff70e50da4889b5031813a65e7fee2d583f22dff1415482de9d607202"} Sep 29 13:54:22 crc kubenswrapper[4869]: I0929 13:54:22.961959 4869 generic.go:334] "Generic (PLEG): container finished" podID="7f085714-91ef-4e76-97eb-80ebc4438834" containerID="bfbf81484e15ddc98533ee23d4d4f3c19c5d942df41dc3bf339f942f3ce3bae9" exitCode=0 Sep 29 13:54:22 crc kubenswrapper[4869]: I0929 13:54:22.962070 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fkmml" event={"ID":"7f085714-91ef-4e76-97eb-80ebc4438834","Type":"ContainerDied","Data":"bfbf81484e15ddc98533ee23d4d4f3c19c5d942df41dc3bf339f942f3ce3bae9"} Sep 29 13:54:22 crc kubenswrapper[4869]: I0929 13:54:22.962090 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fkmml" event={"ID":"7f085714-91ef-4e76-97eb-80ebc4438834","Type":"ContainerStarted","Data":"735cb5aaba83b7cf000ff26e98ede2191318343f75c63e4adb23edb144fe8fe2"} Sep 29 13:54:23 crc kubenswrapper[4869]: I0929 13:54:23.970940 4869 generic.go:334] "Generic (PLEG): container finished" podID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerID="4e3ab24f75e3a953bb2b7a0006ef8894dd145ee60966574fcdcfa3631d1cf60c" exitCode=0 Sep 29 13:54:23 crc kubenswrapper[4869]: I0929 13:54:23.971029 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" event={"ID":"d5ba66e2-b35f-49f1-81aa-5b3007724d39","Type":"ContainerDied","Data":"4e3ab24f75e3a953bb2b7a0006ef8894dd145ee60966574fcdcfa3631d1cf60c"} Sep 29 13:54:23 crc kubenswrapper[4869]: I0929 13:54:23.984711 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fkmml" event={"ID":"7f085714-91ef-4e76-97eb-80ebc4438834","Type":"ContainerStarted","Data":"ab3c4034f5c7040caa8137e01ffbd69ce24ac20ea3e1026efad63a04502a82f8"} Sep 29 13:54:24 crc kubenswrapper[4869]: I0929 13:54:24.993953 4869 generic.go:334] "Generic (PLEG): container finished" podID="7f085714-91ef-4e76-97eb-80ebc4438834" containerID="ab3c4034f5c7040caa8137e01ffbd69ce24ac20ea3e1026efad63a04502a82f8" exitCode=0 Sep 29 13:54:24 crc kubenswrapper[4869]: I0929 13:54:24.994030 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fkmml" event={"ID":"7f085714-91ef-4e76-97eb-80ebc4438834","Type":"ContainerDied","Data":"ab3c4034f5c7040caa8137e01ffbd69ce24ac20ea3e1026efad63a04502a82f8"} Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.391979 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.495199 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-bundle\") pod \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.495372 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-util\") pod \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.495462 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdzhk\" (UniqueName: \"kubernetes.io/projected/d5ba66e2-b35f-49f1-81aa-5b3007724d39-kube-api-access-xdzhk\") pod \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\" (UID: \"d5ba66e2-b35f-49f1-81aa-5b3007724d39\") " Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.497597 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-bundle" (OuterVolumeSpecName: "bundle") pod "d5ba66e2-b35f-49f1-81aa-5b3007724d39" (UID: "d5ba66e2-b35f-49f1-81aa-5b3007724d39"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.507940 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5ba66e2-b35f-49f1-81aa-5b3007724d39-kube-api-access-xdzhk" (OuterVolumeSpecName: "kube-api-access-xdzhk") pod "d5ba66e2-b35f-49f1-81aa-5b3007724d39" (UID: "d5ba66e2-b35f-49f1-81aa-5b3007724d39"). InnerVolumeSpecName "kube-api-access-xdzhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.525060 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.525194 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.590901 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.596725 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdzhk\" (UniqueName: \"kubernetes.io/projected/d5ba66e2-b35f-49f1-81aa-5b3007724d39-kube-api-access-xdzhk\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.596761 4869 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.766677 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-util" (OuterVolumeSpecName: "util") pod "d5ba66e2-b35f-49f1-81aa-5b3007724d39" (UID: "d5ba66e2-b35f-49f1-81aa-5b3007724d39"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:54:25 crc kubenswrapper[4869]: I0929 13:54:25.799386 4869 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d5ba66e2-b35f-49f1-81aa-5b3007724d39-util\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:26 crc kubenswrapper[4869]: I0929 13:54:26.009753 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" event={"ID":"d5ba66e2-b35f-49f1-81aa-5b3007724d39","Type":"ContainerDied","Data":"d5966566d8a86fb740fd8a3b6fca1e62bd3d32b166991053efa74a4eb224f68e"} Sep 29 13:54:26 crc kubenswrapper[4869]: I0929 13:54:26.010537 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5966566d8a86fb740fd8a3b6fca1e62bd3d32b166991053efa74a4eb224f68e" Sep 29 13:54:26 crc kubenswrapper[4869]: I0929 13:54:26.009843 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9" Sep 29 13:54:26 crc kubenswrapper[4869]: I0929 13:54:26.014137 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fkmml" event={"ID":"7f085714-91ef-4e76-97eb-80ebc4438834","Type":"ContainerStarted","Data":"8578b612ce1cd4be2f4a7eff42eb5e460797c4e11c1a38e607f63b139346a625"} Sep 29 13:54:26 crc kubenswrapper[4869]: I0929 13:54:26.040738 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fkmml" podStartSLOduration=2.238459015 podStartE2EDuration="5.040715464s" podCreationTimestamp="2025-09-29 13:54:21 +0000 UTC" firstStartedPulling="2025-09-29 13:54:22.973409661 +0000 UTC m=+789.414054001" lastFinishedPulling="2025-09-29 13:54:25.7756661 +0000 UTC m=+792.216310450" observedRunningTime="2025-09-29 13:54:26.038053364 +0000 UTC m=+792.478697684" watchObservedRunningTime="2025-09-29 13:54:26.040715464 +0000 UTC m=+792.481359784" Sep 29 13:54:26 crc kubenswrapper[4869]: I0929 13:54:26.071722 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.876946 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d"] Sep 29 13:54:27 crc kubenswrapper[4869]: E0929 13:54:27.877193 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerName="pull" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.877204 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerName="pull" Sep 29 13:54:27 crc kubenswrapper[4869]: E0929 13:54:27.877221 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerName="extract" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.877227 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerName="extract" Sep 29 13:54:27 crc kubenswrapper[4869]: E0929 13:54:27.877239 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerName="util" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.877246 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerName="util" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.877351 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5ba66e2-b35f-49f1-81aa-5b3007724d39" containerName="extract" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.877855 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.883307 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-v7w75" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.883552 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.883754 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Sep 29 13:54:27 crc kubenswrapper[4869]: I0929 13:54:27.890248 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d"] Sep 29 13:54:28 crc kubenswrapper[4869]: I0929 13:54:28.030532 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4d92\" (UniqueName: \"kubernetes.io/projected/10bcf6b2-b103-4427-a177-b2123f0a942e-kube-api-access-r4d92\") pod \"nmstate-operator-5d6f6cfd66-klk2d\" (UID: \"10bcf6b2-b103-4427-a177-b2123f0a942e\") " pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d" Sep 29 13:54:28 crc kubenswrapper[4869]: I0929 13:54:28.132138 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4d92\" (UniqueName: \"kubernetes.io/projected/10bcf6b2-b103-4427-a177-b2123f0a942e-kube-api-access-r4d92\") pod \"nmstate-operator-5d6f6cfd66-klk2d\" (UID: \"10bcf6b2-b103-4427-a177-b2123f0a942e\") " pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d" Sep 29 13:54:28 crc kubenswrapper[4869]: I0929 13:54:28.154742 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4d92\" (UniqueName: \"kubernetes.io/projected/10bcf6b2-b103-4427-a177-b2123f0a942e-kube-api-access-r4d92\") pod \"nmstate-operator-5d6f6cfd66-klk2d\" (UID: \"10bcf6b2-b103-4427-a177-b2123f0a942e\") " pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d" Sep 29 13:54:28 crc kubenswrapper[4869]: I0929 13:54:28.198867 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d" Sep 29 13:54:28 crc kubenswrapper[4869]: I0929 13:54:28.692934 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d"] Sep 29 13:54:28 crc kubenswrapper[4869]: W0929 13:54:28.711950 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10bcf6b2_b103_4427_a177_b2123f0a942e.slice/crio-27d94440e20a61a5e1c40a0644f92425a5fc3e4c84dd0711272f8009f825c280 WatchSource:0}: Error finding container 27d94440e20a61a5e1c40a0644f92425a5fc3e4c84dd0711272f8009f825c280: Status 404 returned error can't find the container with id 27d94440e20a61a5e1c40a0644f92425a5fc3e4c84dd0711272f8009f825c280 Sep 29 13:54:29 crc kubenswrapper[4869]: I0929 13:54:29.034214 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d" event={"ID":"10bcf6b2-b103-4427-a177-b2123f0a942e","Type":"ContainerStarted","Data":"27d94440e20a61a5e1c40a0644f92425a5fc3e4c84dd0711272f8009f825c280"} Sep 29 13:54:29 crc kubenswrapper[4869]: I0929 13:54:29.762338 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6c4xr"] Sep 29 13:54:29 crc kubenswrapper[4869]: I0929 13:54:29.762690 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6c4xr" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerName="registry-server" containerID="cri-o://f5d5957fbeca013bdb70a06e6901559c66e7c510be64b206434a886df8b49c10" gracePeriod=2 Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.048667 4869 generic.go:334] "Generic (PLEG): container finished" podID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerID="f5d5957fbeca013bdb70a06e6901559c66e7c510be64b206434a886df8b49c10" exitCode=0 Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.048727 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c4xr" event={"ID":"21742ae5-eff5-4d27-b3f1-2ce5c02ab735","Type":"ContainerDied","Data":"f5d5957fbeca013bdb70a06e6901559c66e7c510be64b206434a886df8b49c10"} Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.533602 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.682457 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-catalog-content\") pod \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.682547 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-utilities\") pod \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.682600 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrz8c\" (UniqueName: \"kubernetes.io/projected/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-kube-api-access-nrz8c\") pod \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\" (UID: \"21742ae5-eff5-4d27-b3f1-2ce5c02ab735\") " Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.683923 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-utilities" (OuterVolumeSpecName: "utilities") pod "21742ae5-eff5-4d27-b3f1-2ce5c02ab735" (UID: "21742ae5-eff5-4d27-b3f1-2ce5c02ab735"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.691255 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-kube-api-access-nrz8c" (OuterVolumeSpecName: "kube-api-access-nrz8c") pod "21742ae5-eff5-4d27-b3f1-2ce5c02ab735" (UID: "21742ae5-eff5-4d27-b3f1-2ce5c02ab735"). InnerVolumeSpecName "kube-api-access-nrz8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.756575 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "21742ae5-eff5-4d27-b3f1-2ce5c02ab735" (UID: "21742ae5-eff5-4d27-b3f1-2ce5c02ab735"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.784456 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.784812 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.784857 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrz8c\" (UniqueName: \"kubernetes.io/projected/21742ae5-eff5-4d27-b3f1-2ce5c02ab735-kube-api-access-nrz8c\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.902004 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.902489 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:31 crc kubenswrapper[4869]: I0929 13:54:31.946068 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.059020 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d" event={"ID":"10bcf6b2-b103-4427-a177-b2123f0a942e","Type":"ContainerStarted","Data":"23012f96304e078917877f5bf16a0a0469391f429abb44450817a778cdd24eb9"} Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.063457 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6c4xr" Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.063442 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c4xr" event={"ID":"21742ae5-eff5-4d27-b3f1-2ce5c02ab735","Type":"ContainerDied","Data":"7d7420705573599f51dfb261be19d6c6cd25ee45c458e9020d41a0ddc4fb0a37"} Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.063692 4869 scope.go:117] "RemoveContainer" containerID="f5d5957fbeca013bdb70a06e6901559c66e7c510be64b206434a886df8b49c10" Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.081458 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5d6f6cfd66-klk2d" podStartSLOduration=2.23853052 podStartE2EDuration="5.081436781s" podCreationTimestamp="2025-09-29 13:54:27 +0000 UTC" firstStartedPulling="2025-09-29 13:54:28.713912106 +0000 UTC m=+795.154556416" lastFinishedPulling="2025-09-29 13:54:31.556818357 +0000 UTC m=+797.997462677" observedRunningTime="2025-09-29 13:54:32.080397844 +0000 UTC m=+798.521042174" watchObservedRunningTime="2025-09-29 13:54:32.081436781 +0000 UTC m=+798.522081101" Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.086574 4869 scope.go:117] "RemoveContainer" containerID="a3a14d02a1fae08b848d0d00d3012b5dace0e00f97a23dffe86c04a070fd9db1" Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.109038 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6c4xr"] Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.111594 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6c4xr"] Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.141896 4869 scope.go:117] "RemoveContainer" containerID="55dc1d9b5bfd21c1f5f8850a1f473f97c8640e7c1e121d92a5679065eb03aeae" Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.151094 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:32 crc kubenswrapper[4869]: I0929 13:54:32.249846 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" path="/var/lib/kubelet/pods/21742ae5-eff5-4d27-b3f1-2ce5c02ab735/volumes" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.159000 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-58fcddf996-xzscn"] Sep 29 13:54:33 crc kubenswrapper[4869]: E0929 13:54:33.159286 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerName="extract-utilities" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.159304 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerName="extract-utilities" Sep 29 13:54:33 crc kubenswrapper[4869]: E0929 13:54:33.159326 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerName="extract-content" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.159334 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerName="extract-content" Sep 29 13:54:33 crc kubenswrapper[4869]: E0929 13:54:33.159344 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerName="registry-server" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.159351 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerName="registry-server" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.159486 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="21742ae5-eff5-4d27-b3f1-2ce5c02ab735" containerName="registry-server" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.160322 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.162449 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-9j6hh" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.180035 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-58fcddf996-xzscn"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.184858 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.185924 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.189134 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.202737 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-zckq9"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.203896 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.224052 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.306100 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pptdh\" (UniqueName: \"kubernetes.io/projected/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-kube-api-access-pptdh\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.306167 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-nmstate-lock\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.306320 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-dbus-socket\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.306408 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j6gh\" (UniqueName: \"kubernetes.io/projected/785a50f5-f92b-4774-a576-66b1f85cdbab-kube-api-access-7j6gh\") pod \"nmstate-webhook-6d689559c5-lrbmp\" (UID: \"785a50f5-f92b-4774-a576-66b1f85cdbab\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.306470 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-ovs-socket\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.306541 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/785a50f5-f92b-4774-a576-66b1f85cdbab-tls-key-pair\") pod \"nmstate-webhook-6d689559c5-lrbmp\" (UID: \"785a50f5-f92b-4774-a576-66b1f85cdbab\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.306587 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp46p\" (UniqueName: \"kubernetes.io/projected/4cc9f3a9-a79b-4734-906b-fe99f71ff3ca-kube-api-access-tp46p\") pod \"nmstate-metrics-58fcddf996-xzscn\" (UID: \"4cc9f3a9-a79b-4734-906b-fe99f71ff3ca\") " pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.331750 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.333109 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.336135 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.336397 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-5bp9n" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.336921 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.349376 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.369160 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4cxrp"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.370451 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.393307 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4cxrp"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.407958 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pptdh\" (UniqueName: \"kubernetes.io/projected/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-kube-api-access-pptdh\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408040 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-nginx-conf\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408074 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c4wd\" (UniqueName: \"kubernetes.io/projected/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-kube-api-access-2c4wd\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408103 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-nmstate-lock\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408143 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-dbus-socket\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408255 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j6gh\" (UniqueName: \"kubernetes.io/projected/785a50f5-f92b-4774-a576-66b1f85cdbab-kube-api-access-7j6gh\") pod \"nmstate-webhook-6d689559c5-lrbmp\" (UID: \"785a50f5-f92b-4774-a576-66b1f85cdbab\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408455 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-plugin-serving-cert\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408498 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-nmstate-lock\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408476 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-dbus-socket\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408527 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-ovs-socket\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408556 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-ovs-socket\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408648 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/785a50f5-f92b-4774-a576-66b1f85cdbab-tls-key-pair\") pod \"nmstate-webhook-6d689559c5-lrbmp\" (UID: \"785a50f5-f92b-4774-a576-66b1f85cdbab\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.408693 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp46p\" (UniqueName: \"kubernetes.io/projected/4cc9f3a9-a79b-4734-906b-fe99f71ff3ca-kube-api-access-tp46p\") pod \"nmstate-metrics-58fcddf996-xzscn\" (UID: \"4cc9f3a9-a79b-4734-906b-fe99f71ff3ca\") " pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" Sep 29 13:54:33 crc kubenswrapper[4869]: E0929 13:54:33.409180 4869 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Sep 29 13:54:33 crc kubenswrapper[4869]: E0929 13:54:33.409425 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/785a50f5-f92b-4774-a576-66b1f85cdbab-tls-key-pair podName:785a50f5-f92b-4774-a576-66b1f85cdbab nodeName:}" failed. No retries permitted until 2025-09-29 13:54:33.909393475 +0000 UTC m=+800.350037795 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/785a50f5-f92b-4774-a576-66b1f85cdbab-tls-key-pair") pod "nmstate-webhook-6d689559c5-lrbmp" (UID: "785a50f5-f92b-4774-a576-66b1f85cdbab") : secret "openshift-nmstate-webhook" not found Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.427811 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp46p\" (UniqueName: \"kubernetes.io/projected/4cc9f3a9-a79b-4734-906b-fe99f71ff3ca-kube-api-access-tp46p\") pod \"nmstate-metrics-58fcddf996-xzscn\" (UID: \"4cc9f3a9-a79b-4734-906b-fe99f71ff3ca\") " pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.428324 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j6gh\" (UniqueName: \"kubernetes.io/projected/785a50f5-f92b-4774-a576-66b1f85cdbab-kube-api-access-7j6gh\") pod \"nmstate-webhook-6d689559c5-lrbmp\" (UID: \"785a50f5-f92b-4774-a576-66b1f85cdbab\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.447601 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pptdh\" (UniqueName: \"kubernetes.io/projected/f7cec7ed-cfcf-4e48-8146-259ffff9cebf-kube-api-access-pptdh\") pod \"nmstate-handler-zckq9\" (UID: \"f7cec7ed-cfcf-4e48-8146-259ffff9cebf\") " pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.476446 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.509930 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-plugin-serving-cert\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.510391 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-utilities\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.510530 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-catalog-content\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.510773 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-nginx-conf\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.510894 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c4wd\" (UniqueName: \"kubernetes.io/projected/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-kube-api-access-2c4wd\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.511052 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmvtb\" (UniqueName: \"kubernetes.io/projected/9a325fbd-9beb-491e-a1ac-db98336e1ceb-kube-api-access-tmvtb\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: E0929 13:54:33.510158 4869 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Sep 29 13:54:33 crc kubenswrapper[4869]: E0929 13:54:33.511382 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-plugin-serving-cert podName:9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa nodeName:}" failed. No retries permitted until 2025-09-29 13:54:34.011344125 +0000 UTC m=+800.451988455 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-plugin-serving-cert") pod "nmstate-console-plugin-864bb6dfb5-874wj" (UID: "9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa") : secret "plugin-serving-cert" not found Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.512527 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-nginx-conf\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.519153 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.544597 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-78776b8fbf-pxw7x"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.545414 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.557566 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c4wd\" (UniqueName: \"kubernetes.io/projected/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-kube-api-access-2c4wd\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:33 crc kubenswrapper[4869]: W0929 13:54:33.570900 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7cec7ed_cfcf_4e48_8146_259ffff9cebf.slice/crio-baca8f27c4ab0513f755bbdc6d6410f5d75bba3556de30d0c35e33e5121ea03f WatchSource:0}: Error finding container baca8f27c4ab0513f755bbdc6d6410f5d75bba3556de30d0c35e33e5121ea03f: Status 404 returned error can't find the container with id baca8f27c4ab0513f755bbdc6d6410f5d75bba3556de30d0c35e33e5121ea03f Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.571580 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-78776b8fbf-pxw7x"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612371 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-service-ca\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612436 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3087328-1b68-4786-af44-32c9c5332015-console-serving-cert\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612474 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-utilities\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612508 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-catalog-content\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612544 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c3087328-1b68-4786-af44-32c9c5332015-console-oauth-config\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612572 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-console-config\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612588 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-trusted-ca-bundle\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612612 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-oauth-serving-cert\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612661 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8pjv\" (UniqueName: \"kubernetes.io/projected/c3087328-1b68-4786-af44-32c9c5332015-kube-api-access-c8pjv\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.612680 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmvtb\" (UniqueName: \"kubernetes.io/projected/9a325fbd-9beb-491e-a1ac-db98336e1ceb-kube-api-access-tmvtb\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.613837 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-utilities\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.614117 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-catalog-content\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.632544 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmvtb\" (UniqueName: \"kubernetes.io/projected/9a325fbd-9beb-491e-a1ac-db98336e1ceb-kube-api-access-tmvtb\") pod \"certified-operators-4cxrp\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.689149 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.717572 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-service-ca\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.717683 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3087328-1b68-4786-af44-32c9c5332015-console-serving-cert\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.717758 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c3087328-1b68-4786-af44-32c9c5332015-console-oauth-config\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.717791 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-console-config\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.717813 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-trusted-ca-bundle\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.717841 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-oauth-serving-cert\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.717887 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8pjv\" (UniqueName: \"kubernetes.io/projected/c3087328-1b68-4786-af44-32c9c5332015-kube-api-access-c8pjv\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.718518 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-service-ca\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.719076 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-console-config\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.721296 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-trusted-ca-bundle\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.723914 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3087328-1b68-4786-af44-32c9c5332015-console-serving-cert\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.723947 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c3087328-1b68-4786-af44-32c9c5332015-oauth-serving-cert\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.728260 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c3087328-1b68-4786-af44-32c9c5332015-console-oauth-config\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.736908 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8pjv\" (UniqueName: \"kubernetes.io/projected/c3087328-1b68-4786-af44-32c9c5332015-kube-api-access-c8pjv\") pod \"console-78776b8fbf-pxw7x\" (UID: \"c3087328-1b68-4786-af44-32c9c5332015\") " pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.828088 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-58fcddf996-xzscn"] Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.917924 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.920333 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/785a50f5-f92b-4774-a576-66b1f85cdbab-tls-key-pair\") pod \"nmstate-webhook-6d689559c5-lrbmp\" (UID: \"785a50f5-f92b-4774-a576-66b1f85cdbab\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.924634 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/785a50f5-f92b-4774-a576-66b1f85cdbab-tls-key-pair\") pod \"nmstate-webhook-6d689559c5-lrbmp\" (UID: \"785a50f5-f92b-4774-a576-66b1f85cdbab\") " pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:33 crc kubenswrapper[4869]: I0929 13:54:33.970466 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4cxrp"] Sep 29 13:54:34 crc kubenswrapper[4869]: W0929 13:54:34.002830 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a325fbd_9beb_491e_a1ac_db98336e1ceb.slice/crio-942d5e7cad60f712bc39b7baeedc86d0e18e0499140fe924deffb09edd78c265 WatchSource:0}: Error finding container 942d5e7cad60f712bc39b7baeedc86d0e18e0499140fe924deffb09edd78c265: Status 404 returned error can't find the container with id 942d5e7cad60f712bc39b7baeedc86d0e18e0499140fe924deffb09edd78c265 Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.021407 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-plugin-serving-cert\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.024979 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa-plugin-serving-cert\") pod \"nmstate-console-plugin-864bb6dfb5-874wj\" (UID: \"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa\") " pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.088809 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" event={"ID":"4cc9f3a9-a79b-4734-906b-fe99f71ff3ca","Type":"ContainerStarted","Data":"4d1f84025c7c12b6a24c7211e9c1f1b2281222a28e94f1f3c62f94ca01907b77"} Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.099490 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.115095 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-zckq9" event={"ID":"f7cec7ed-cfcf-4e48-8146-259ffff9cebf","Type":"ContainerStarted","Data":"baca8f27c4ab0513f755bbdc6d6410f5d75bba3556de30d0c35e33e5121ea03f"} Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.117277 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4cxrp" event={"ID":"9a325fbd-9beb-491e-a1ac-db98336e1ceb","Type":"ContainerStarted","Data":"942d5e7cad60f712bc39b7baeedc86d0e18e0499140fe924deffb09edd78c265"} Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.254790 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.268876 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-78776b8fbf-pxw7x"] Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.504683 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj"] Sep 29 13:54:34 crc kubenswrapper[4869]: I0929 13:54:34.622767 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp"] Sep 29 13:54:34 crc kubenswrapper[4869]: W0929 13:54:34.624813 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod785a50f5_f92b_4774_a576_66b1f85cdbab.slice/crio-7f4f8d429ec6e9277e42373f137313b9e4022aeb88cbc9242545c6e1040145a5 WatchSource:0}: Error finding container 7f4f8d429ec6e9277e42373f137313b9e4022aeb88cbc9242545c6e1040145a5: Status 404 returned error can't find the container with id 7f4f8d429ec6e9277e42373f137313b9e4022aeb88cbc9242545c6e1040145a5 Sep 29 13:54:35 crc kubenswrapper[4869]: I0929 13:54:35.126211 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" event={"ID":"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa","Type":"ContainerStarted","Data":"c1f1501936f8a7fc6380053334ac5cf74164f75b9c86487614962b456847c50f"} Sep 29 13:54:35 crc kubenswrapper[4869]: I0929 13:54:35.128020 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-78776b8fbf-pxw7x" event={"ID":"c3087328-1b68-4786-af44-32c9c5332015","Type":"ContainerStarted","Data":"c1089556e551894933f884c2653edbb0c0b5a0518ff01714ccb366772e680f10"} Sep 29 13:54:35 crc kubenswrapper[4869]: I0929 13:54:35.128043 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-78776b8fbf-pxw7x" event={"ID":"c3087328-1b68-4786-af44-32c9c5332015","Type":"ContainerStarted","Data":"d7c949919a2a09efeb4172971ebf63a38a38f709a2e85f1cb03acc25f1f7b074"} Sep 29 13:54:35 crc kubenswrapper[4869]: I0929 13:54:35.130925 4869 generic.go:334] "Generic (PLEG): container finished" podID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerID="2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef" exitCode=0 Sep 29 13:54:35 crc kubenswrapper[4869]: I0929 13:54:35.131034 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4cxrp" event={"ID":"9a325fbd-9beb-491e-a1ac-db98336e1ceb","Type":"ContainerDied","Data":"2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef"} Sep 29 13:54:35 crc kubenswrapper[4869]: I0929 13:54:35.134814 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" event={"ID":"785a50f5-f92b-4774-a576-66b1f85cdbab","Type":"ContainerStarted","Data":"7f4f8d429ec6e9277e42373f137313b9e4022aeb88cbc9242545c6e1040145a5"} Sep 29 13:54:35 crc kubenswrapper[4869]: I0929 13:54:35.158987 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-78776b8fbf-pxw7x" podStartSLOduration=2.158960543 podStartE2EDuration="2.158960543s" podCreationTimestamp="2025-09-29 13:54:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:54:35.155200394 +0000 UTC m=+801.595844724" watchObservedRunningTime="2025-09-29 13:54:35.158960543 +0000 UTC m=+801.599604863" Sep 29 13:54:36 crc kubenswrapper[4869]: I0929 13:54:36.975420 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fkmml"] Sep 29 13:54:36 crc kubenswrapper[4869]: I0929 13:54:36.978586 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fkmml" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" containerName="registry-server" containerID="cri-o://8578b612ce1cd4be2f4a7eff42eb5e460797c4e11c1a38e607f63b139346a625" gracePeriod=2 Sep 29 13:54:37 crc kubenswrapper[4869]: I0929 13:54:37.155859 4869 generic.go:334] "Generic (PLEG): container finished" podID="7f085714-91ef-4e76-97eb-80ebc4438834" containerID="8578b612ce1cd4be2f4a7eff42eb5e460797c4e11c1a38e607f63b139346a625" exitCode=0 Sep 29 13:54:37 crc kubenswrapper[4869]: I0929 13:54:37.155923 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fkmml" event={"ID":"7f085714-91ef-4e76-97eb-80ebc4438834","Type":"ContainerDied","Data":"8578b612ce1cd4be2f4a7eff42eb5e460797c4e11c1a38e607f63b139346a625"} Sep 29 13:54:37 crc kubenswrapper[4869]: I0929 13:54:37.836575 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:37 crc kubenswrapper[4869]: I0929 13:54:37.996261 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-catalog-content\") pod \"7f085714-91ef-4e76-97eb-80ebc4438834\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " Sep 29 13:54:37 crc kubenswrapper[4869]: I0929 13:54:37.996454 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-utilities\") pod \"7f085714-91ef-4e76-97eb-80ebc4438834\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " Sep 29 13:54:37 crc kubenswrapper[4869]: I0929 13:54:37.996514 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rfq8\" (UniqueName: \"kubernetes.io/projected/7f085714-91ef-4e76-97eb-80ebc4438834-kube-api-access-2rfq8\") pod \"7f085714-91ef-4e76-97eb-80ebc4438834\" (UID: \"7f085714-91ef-4e76-97eb-80ebc4438834\") " Sep 29 13:54:37 crc kubenswrapper[4869]: I0929 13:54:37.998042 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-utilities" (OuterVolumeSpecName: "utilities") pod "7f085714-91ef-4e76-97eb-80ebc4438834" (UID: "7f085714-91ef-4e76-97eb-80ebc4438834"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.010371 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f085714-91ef-4e76-97eb-80ebc4438834-kube-api-access-2rfq8" (OuterVolumeSpecName: "kube-api-access-2rfq8") pod "7f085714-91ef-4e76-97eb-80ebc4438834" (UID: "7f085714-91ef-4e76-97eb-80ebc4438834"). InnerVolumeSpecName "kube-api-access-2rfq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.095266 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7f085714-91ef-4e76-97eb-80ebc4438834" (UID: "7f085714-91ef-4e76-97eb-80ebc4438834"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.099434 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.099483 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rfq8\" (UniqueName: \"kubernetes.io/projected/7f085714-91ef-4e76-97eb-80ebc4438834-kube-api-access-2rfq8\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.099500 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f085714-91ef-4e76-97eb-80ebc4438834-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.166679 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" event={"ID":"4cc9f3a9-a79b-4734-906b-fe99f71ff3ca","Type":"ContainerStarted","Data":"8d4ca63e07362aaecedcc7cc7e03e0c6b0316ee624354be6688bd04bfce0ac0f"} Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.170988 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" event={"ID":"9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa","Type":"ContainerStarted","Data":"c36eb1d3076716266ab7cc14876e9bee88fdd2e8e61c1233a689185f542645cb"} Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.176216 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4cxrp" event={"ID":"9a325fbd-9beb-491e-a1ac-db98336e1ceb","Type":"ContainerStarted","Data":"92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2"} Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.178008 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" event={"ID":"785a50f5-f92b-4774-a576-66b1f85cdbab","Type":"ContainerStarted","Data":"40e1713fd9d95cacb78686d10f5b0b79da8ba9e089081834537f3b968a478084"} Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.178370 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.185262 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fkmml" event={"ID":"7f085714-91ef-4e76-97eb-80ebc4438834","Type":"ContainerDied","Data":"735cb5aaba83b7cf000ff26e98ede2191318343f75c63e4adb23edb144fe8fe2"} Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.185308 4869 scope.go:117] "RemoveContainer" containerID="8578b612ce1cd4be2f4a7eff42eb5e460797c4e11c1a38e607f63b139346a625" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.185430 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fkmml" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.197204 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-864bb6dfb5-874wj" podStartSLOduration=1.833557286 podStartE2EDuration="5.197175838s" podCreationTimestamp="2025-09-29 13:54:33 +0000 UTC" firstStartedPulling="2025-09-29 13:54:34.515798461 +0000 UTC m=+800.956442771" lastFinishedPulling="2025-09-29 13:54:37.879417003 +0000 UTC m=+804.320061323" observedRunningTime="2025-09-29 13:54:38.19195242 +0000 UTC m=+804.632596760" watchObservedRunningTime="2025-09-29 13:54:38.197175838 +0000 UTC m=+804.637820168" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.229830 4869 scope.go:117] "RemoveContainer" containerID="ab3c4034f5c7040caa8137e01ffbd69ce24ac20ea3e1026efad63a04502a82f8" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.244439 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" podStartSLOduration=1.989006289 podStartE2EDuration="5.244414175s" podCreationTimestamp="2025-09-29 13:54:33 +0000 UTC" firstStartedPulling="2025-09-29 13:54:34.626482202 +0000 UTC m=+801.067126522" lastFinishedPulling="2025-09-29 13:54:37.881890058 +0000 UTC m=+804.322534408" observedRunningTime="2025-09-29 13:54:38.238469308 +0000 UTC m=+804.679113628" watchObservedRunningTime="2025-09-29 13:54:38.244414175 +0000 UTC m=+804.685058495" Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.268775 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fkmml"] Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.268827 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fkmml"] Sep 29 13:54:38 crc kubenswrapper[4869]: I0929 13:54:38.274213 4869 scope.go:117] "RemoveContainer" containerID="bfbf81484e15ddc98533ee23d4d4f3c19c5d942df41dc3bf339f942f3ce3bae9" Sep 29 13:54:39 crc kubenswrapper[4869]: I0929 13:54:39.197314 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-zckq9" event={"ID":"f7cec7ed-cfcf-4e48-8146-259ffff9cebf","Type":"ContainerStarted","Data":"3d38f80a6aca76df31d7520fa21095dae45308ffb1614f66f8c53f18bc8065e9"} Sep 29 13:54:39 crc kubenswrapper[4869]: I0929 13:54:39.198069 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:39 crc kubenswrapper[4869]: I0929 13:54:39.202437 4869 generic.go:334] "Generic (PLEG): container finished" podID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerID="92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2" exitCode=0 Sep 29 13:54:39 crc kubenswrapper[4869]: I0929 13:54:39.202634 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4cxrp" event={"ID":"9a325fbd-9beb-491e-a1ac-db98336e1ceb","Type":"ContainerDied","Data":"92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2"} Sep 29 13:54:39 crc kubenswrapper[4869]: I0929 13:54:39.246648 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-zckq9" podStartSLOduration=2.042732628 podStartE2EDuration="6.246571141s" podCreationTimestamp="2025-09-29 13:54:33 +0000 UTC" firstStartedPulling="2025-09-29 13:54:33.58237836 +0000 UTC m=+800.023022670" lastFinishedPulling="2025-09-29 13:54:37.786216863 +0000 UTC m=+804.226861183" observedRunningTime="2025-09-29 13:54:39.221966442 +0000 UTC m=+805.662610762" watchObservedRunningTime="2025-09-29 13:54:39.246571141 +0000 UTC m=+805.687215471" Sep 29 13:54:40 crc kubenswrapper[4869]: I0929 13:54:40.215130 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4cxrp" event={"ID":"9a325fbd-9beb-491e-a1ac-db98336e1ceb","Type":"ContainerStarted","Data":"e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c"} Sep 29 13:54:40 crc kubenswrapper[4869]: I0929 13:54:40.241320 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4cxrp" podStartSLOduration=2.7205432529999998 podStartE2EDuration="7.241290231s" podCreationTimestamp="2025-09-29 13:54:33 +0000 UTC" firstStartedPulling="2025-09-29 13:54:35.133003178 +0000 UTC m=+801.573647498" lastFinishedPulling="2025-09-29 13:54:39.653750156 +0000 UTC m=+806.094394476" observedRunningTime="2025-09-29 13:54:40.238155118 +0000 UTC m=+806.678799478" watchObservedRunningTime="2025-09-29 13:54:40.241290231 +0000 UTC m=+806.681934561" Sep 29 13:54:40 crc kubenswrapper[4869]: I0929 13:54:40.255370 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" path="/var/lib/kubelet/pods/7f085714-91ef-4e76-97eb-80ebc4438834/volumes" Sep 29 13:54:42 crc kubenswrapper[4869]: I0929 13:54:42.230775 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" event={"ID":"4cc9f3a9-a79b-4734-906b-fe99f71ff3ca","Type":"ContainerStarted","Data":"019ed0af67eb00d246812cb5abbd4e50d4c42c1d2e4e01cad063a28eeb49f2f8"} Sep 29 13:54:42 crc kubenswrapper[4869]: I0929 13:54:42.255892 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-58fcddf996-xzscn" podStartSLOduration=1.720013011 podStartE2EDuration="9.255862993s" podCreationTimestamp="2025-09-29 13:54:33 +0000 UTC" firstStartedPulling="2025-09-29 13:54:33.841829226 +0000 UTC m=+800.282473546" lastFinishedPulling="2025-09-29 13:54:41.377679198 +0000 UTC m=+807.818323528" observedRunningTime="2025-09-29 13:54:42.255542324 +0000 UTC m=+808.696186674" watchObservedRunningTime="2025-09-29 13:54:42.255862993 +0000 UTC m=+808.696507333" Sep 29 13:54:43 crc kubenswrapper[4869]: I0929 13:54:43.562410 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-zckq9" Sep 29 13:54:43 crc kubenswrapper[4869]: I0929 13:54:43.690455 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:43 crc kubenswrapper[4869]: I0929 13:54:43.690535 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:43 crc kubenswrapper[4869]: I0929 13:54:43.743748 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:43 crc kubenswrapper[4869]: I0929 13:54:43.918924 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:43 crc kubenswrapper[4869]: I0929 13:54:43.919044 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:43 crc kubenswrapper[4869]: I0929 13:54:43.926021 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:44 crc kubenswrapper[4869]: I0929 13:54:44.262380 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-78776b8fbf-pxw7x" Sep 29 13:54:44 crc kubenswrapper[4869]: I0929 13:54:44.303169 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:44 crc kubenswrapper[4869]: I0929 13:54:44.325273 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m2zpm"] Sep 29 13:54:44 crc kubenswrapper[4869]: I0929 13:54:44.762307 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4cxrp"] Sep 29 13:54:46 crc kubenswrapper[4869]: I0929 13:54:46.263206 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4cxrp" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerName="registry-server" containerID="cri-o://e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c" gracePeriod=2 Sep 29 13:54:46 crc kubenswrapper[4869]: I0929 13:54:46.839628 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:46 crc kubenswrapper[4869]: I0929 13:54:46.952867 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-utilities\") pod \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " Sep 29 13:54:46 crc kubenswrapper[4869]: I0929 13:54:46.952985 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-catalog-content\") pod \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " Sep 29 13:54:46 crc kubenswrapper[4869]: I0929 13:54:46.953653 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmvtb\" (UniqueName: \"kubernetes.io/projected/9a325fbd-9beb-491e-a1ac-db98336e1ceb-kube-api-access-tmvtb\") pod \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\" (UID: \"9a325fbd-9beb-491e-a1ac-db98336e1ceb\") " Sep 29 13:54:46 crc kubenswrapper[4869]: I0929 13:54:46.954507 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-utilities" (OuterVolumeSpecName: "utilities") pod "9a325fbd-9beb-491e-a1ac-db98336e1ceb" (UID: "9a325fbd-9beb-491e-a1ac-db98336e1ceb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:54:46 crc kubenswrapper[4869]: I0929 13:54:46.959954 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a325fbd-9beb-491e-a1ac-db98336e1ceb-kube-api-access-tmvtb" (OuterVolumeSpecName: "kube-api-access-tmvtb") pod "9a325fbd-9beb-491e-a1ac-db98336e1ceb" (UID: "9a325fbd-9beb-491e-a1ac-db98336e1ceb"). InnerVolumeSpecName "kube-api-access-tmvtb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.009267 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a325fbd-9beb-491e-a1ac-db98336e1ceb" (UID: "9a325fbd-9beb-491e-a1ac-db98336e1ceb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.055318 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.055373 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a325fbd-9beb-491e-a1ac-db98336e1ceb-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.055392 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmvtb\" (UniqueName: \"kubernetes.io/projected/9a325fbd-9beb-491e-a1ac-db98336e1ceb-kube-api-access-tmvtb\") on node \"crc\" DevicePath \"\"" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.271760 4869 generic.go:334] "Generic (PLEG): container finished" podID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerID="e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c" exitCode=0 Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.271828 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4cxrp" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.271826 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4cxrp" event={"ID":"9a325fbd-9beb-491e-a1ac-db98336e1ceb","Type":"ContainerDied","Data":"e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c"} Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.272044 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4cxrp" event={"ID":"9a325fbd-9beb-491e-a1ac-db98336e1ceb","Type":"ContainerDied","Data":"942d5e7cad60f712bc39b7baeedc86d0e18e0499140fe924deffb09edd78c265"} Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.272072 4869 scope.go:117] "RemoveContainer" containerID="e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.307633 4869 scope.go:117] "RemoveContainer" containerID="92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.309163 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4cxrp"] Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.314808 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4cxrp"] Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.337011 4869 scope.go:117] "RemoveContainer" containerID="2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.354439 4869 scope.go:117] "RemoveContainer" containerID="e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c" Sep 29 13:54:47 crc kubenswrapper[4869]: E0929 13:54:47.355096 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c\": container with ID starting with e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c not found: ID does not exist" containerID="e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.355182 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c"} err="failed to get container status \"e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c\": rpc error: code = NotFound desc = could not find container \"e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c\": container with ID starting with e592e3a5f615b4c6e1020d8e47d44170cc44fc8d29cc3fa6a0b997a08dfa878c not found: ID does not exist" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.355253 4869 scope.go:117] "RemoveContainer" containerID="92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2" Sep 29 13:54:47 crc kubenswrapper[4869]: E0929 13:54:47.355824 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2\": container with ID starting with 92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2 not found: ID does not exist" containerID="92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.355871 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2"} err="failed to get container status \"92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2\": rpc error: code = NotFound desc = could not find container \"92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2\": container with ID starting with 92b5d75033de0cec54c3e7e6777fdcba469a5f5f999a00d4ebdca1d4d05b7fa2 not found: ID does not exist" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.355903 4869 scope.go:117] "RemoveContainer" containerID="2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef" Sep 29 13:54:47 crc kubenswrapper[4869]: E0929 13:54:47.356206 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef\": container with ID starting with 2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef not found: ID does not exist" containerID="2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef" Sep 29 13:54:47 crc kubenswrapper[4869]: I0929 13:54:47.356240 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef"} err="failed to get container status \"2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef\": rpc error: code = NotFound desc = could not find container \"2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef\": container with ID starting with 2f9355ff798e785ba29fc8a1bb2852a325f538046d1fab3f48a493fa17e7b5ef not found: ID does not exist" Sep 29 13:54:48 crc kubenswrapper[4869]: I0929 13:54:48.250147 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" path="/var/lib/kubelet/pods/9a325fbd-9beb-491e-a1ac-db98336e1ceb/volumes" Sep 29 13:54:54 crc kubenswrapper[4869]: I0929 13:54:54.108075 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6d689559c5-lrbmp" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.338852 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht"] Sep 29 13:55:09 crc kubenswrapper[4869]: E0929 13:55:09.339747 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerName="extract-utilities" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.339761 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerName="extract-utilities" Sep 29 13:55:09 crc kubenswrapper[4869]: E0929 13:55:09.339775 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" containerName="extract-utilities" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.339781 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" containerName="extract-utilities" Sep 29 13:55:09 crc kubenswrapper[4869]: E0929 13:55:09.339794 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" containerName="extract-content" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.339801 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" containerName="extract-content" Sep 29 13:55:09 crc kubenswrapper[4869]: E0929 13:55:09.339814 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" containerName="registry-server" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.339819 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" containerName="registry-server" Sep 29 13:55:09 crc kubenswrapper[4869]: E0929 13:55:09.339831 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerName="extract-content" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.339836 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerName="extract-content" Sep 29 13:55:09 crc kubenswrapper[4869]: E0929 13:55:09.339851 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerName="registry-server" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.339857 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerName="registry-server" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.339974 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a325fbd-9beb-491e-a1ac-db98336e1ceb" containerName="registry-server" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.339993 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f085714-91ef-4e76-97eb-80ebc4438834" containerName="registry-server" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.340875 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.343163 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.352986 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht"] Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.380746 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-m2zpm" podUID="ac14296d-ce5b-4b73-84f6-3f39e3280f26" containerName="console" containerID="cri-o://a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e" gracePeriod=15 Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.506680 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-bundle\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.506737 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-util\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.506803 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m28gj\" (UniqueName: \"kubernetes.io/projected/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-kube-api-access-m28gj\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.608462 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m28gj\" (UniqueName: \"kubernetes.io/projected/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-kube-api-access-m28gj\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.608573 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-bundle\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.608636 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-util\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.609374 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-util\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.609649 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-bundle\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.652764 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m28gj\" (UniqueName: \"kubernetes.io/projected/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-kube-api-access-m28gj\") pod \"f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.676347 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.764578 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m2zpm_ac14296d-ce5b-4b73-84f6-3f39e3280f26/console/0.log" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.765112 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.914567 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-service-ca\") pod \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.914757 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-oauth-config\") pod \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.914821 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-trusted-ca-bundle\") pod \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.914918 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-serving-cert\") pod \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.914944 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-config\") pod \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.915007 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsqbw\" (UniqueName: \"kubernetes.io/projected/ac14296d-ce5b-4b73-84f6-3f39e3280f26-kube-api-access-qsqbw\") pod \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.915046 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-oauth-serving-cert\") pod \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\" (UID: \"ac14296d-ce5b-4b73-84f6-3f39e3280f26\") " Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.916596 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ac14296d-ce5b-4b73-84f6-3f39e3280f26" (UID: "ac14296d-ce5b-4b73-84f6-3f39e3280f26"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.917179 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-service-ca" (OuterVolumeSpecName: "service-ca") pod "ac14296d-ce5b-4b73-84f6-3f39e3280f26" (UID: "ac14296d-ce5b-4b73-84f6-3f39e3280f26"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.918911 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ac14296d-ce5b-4b73-84f6-3f39e3280f26" (UID: "ac14296d-ce5b-4b73-84f6-3f39e3280f26"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.919216 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-config" (OuterVolumeSpecName: "console-config") pod "ac14296d-ce5b-4b73-84f6-3f39e3280f26" (UID: "ac14296d-ce5b-4b73-84f6-3f39e3280f26"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.931275 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ac14296d-ce5b-4b73-84f6-3f39e3280f26" (UID: "ac14296d-ce5b-4b73-84f6-3f39e3280f26"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.932201 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac14296d-ce5b-4b73-84f6-3f39e3280f26-kube-api-access-qsqbw" (OuterVolumeSpecName: "kube-api-access-qsqbw") pod "ac14296d-ce5b-4b73-84f6-3f39e3280f26" (UID: "ac14296d-ce5b-4b73-84f6-3f39e3280f26"). InnerVolumeSpecName "kube-api-access-qsqbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:55:09 crc kubenswrapper[4869]: I0929 13:55:09.932550 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ac14296d-ce5b-4b73-84f6-3f39e3280f26" (UID: "ac14296d-ce5b-4b73-84f6-3f39e3280f26"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.016717 4869 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.016773 4869 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.016788 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsqbw\" (UniqueName: \"kubernetes.io/projected/ac14296d-ce5b-4b73-84f6-3f39e3280f26-kube-api-access-qsqbw\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.016800 4869 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.016812 4869 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-service-ca\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.016822 4869 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ac14296d-ce5b-4b73-84f6-3f39e3280f26-console-oauth-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.016832 4869 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ac14296d-ce5b-4b73-84f6-3f39e3280f26-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.037516 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht"] Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.448816 4869 generic.go:334] "Generic (PLEG): container finished" podID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerID="af1fe3903d3dccd9d1df51b2a385b54ac49d2235b42c35b5428f87ae3968665b" exitCode=0 Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.448909 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" event={"ID":"4233fe38-bc5e-4f0e-a692-a14ef606c9ca","Type":"ContainerDied","Data":"af1fe3903d3dccd9d1df51b2a385b54ac49d2235b42c35b5428f87ae3968665b"} Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.449212 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" event={"ID":"4233fe38-bc5e-4f0e-a692-a14ef606c9ca","Type":"ContainerStarted","Data":"8831ab27dcb926faa9b6ab8a03fa0f012e4a84f2a463d7c6f6bf747cfc33db91"} Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.453201 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m2zpm_ac14296d-ce5b-4b73-84f6-3f39e3280f26/console/0.log" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.453257 4869 generic.go:334] "Generic (PLEG): container finished" podID="ac14296d-ce5b-4b73-84f6-3f39e3280f26" containerID="a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e" exitCode=2 Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.453295 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m2zpm" event={"ID":"ac14296d-ce5b-4b73-84f6-3f39e3280f26","Type":"ContainerDied","Data":"a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e"} Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.453331 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m2zpm" event={"ID":"ac14296d-ce5b-4b73-84f6-3f39e3280f26","Type":"ContainerDied","Data":"c3b0d79821e77a98dadd11dbb1e5fc893bd7e363a19e5e1c515e88ebe2c145b3"} Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.453356 4869 scope.go:117] "RemoveContainer" containerID="a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.453565 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m2zpm" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.478701 4869 scope.go:117] "RemoveContainer" containerID="a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e" Sep 29 13:55:10 crc kubenswrapper[4869]: E0929 13:55:10.479402 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e\": container with ID starting with a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e not found: ID does not exist" containerID="a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.479470 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e"} err="failed to get container status \"a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e\": rpc error: code = NotFound desc = could not find container \"a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e\": container with ID starting with a804e841e6efe8b3d9801aed3065a0deb3d1877de2ade1731289afdaaad6987e not found: ID does not exist" Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.491312 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m2zpm"] Sep 29 13:55:10 crc kubenswrapper[4869]: I0929 13:55:10.496157 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-m2zpm"] Sep 29 13:55:12 crc kubenswrapper[4869]: I0929 13:55:12.249426 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac14296d-ce5b-4b73-84f6-3f39e3280f26" path="/var/lib/kubelet/pods/ac14296d-ce5b-4b73-84f6-3f39e3280f26/volumes" Sep 29 13:55:12 crc kubenswrapper[4869]: I0929 13:55:12.473249 4869 generic.go:334] "Generic (PLEG): container finished" podID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerID="e57abac8abbbf885fc112f9d3a67b42d2c617e29bdbc0e8ed58c4f14486426a7" exitCode=0 Sep 29 13:55:12 crc kubenswrapper[4869]: I0929 13:55:12.473299 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" event={"ID":"4233fe38-bc5e-4f0e-a692-a14ef606c9ca","Type":"ContainerDied","Data":"e57abac8abbbf885fc112f9d3a67b42d2c617e29bdbc0e8ed58c4f14486426a7"} Sep 29 13:55:13 crc kubenswrapper[4869]: I0929 13:55:13.484844 4869 generic.go:334] "Generic (PLEG): container finished" podID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerID="6176e509c0c031b85d63aa95183b58b6db987990f298c798bf75d29bf9b9dd8e" exitCode=0 Sep 29 13:55:13 crc kubenswrapper[4869]: I0929 13:55:13.484909 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" event={"ID":"4233fe38-bc5e-4f0e-a692-a14ef606c9ca","Type":"ContainerDied","Data":"6176e509c0c031b85d63aa95183b58b6db987990f298c798bf75d29bf9b9dd8e"} Sep 29 13:55:14 crc kubenswrapper[4869]: I0929 13:55:14.859653 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:14 crc kubenswrapper[4869]: I0929 13:55:14.991886 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m28gj\" (UniqueName: \"kubernetes.io/projected/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-kube-api-access-m28gj\") pod \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " Sep 29 13:55:14 crc kubenswrapper[4869]: I0929 13:55:14.992695 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-bundle\") pod \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " Sep 29 13:55:14 crc kubenswrapper[4869]: I0929 13:55:14.993963 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-bundle" (OuterVolumeSpecName: "bundle") pod "4233fe38-bc5e-4f0e-a692-a14ef606c9ca" (UID: "4233fe38-bc5e-4f0e-a692-a14ef606c9ca"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:55:14 crc kubenswrapper[4869]: I0929 13:55:14.994113 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-util\") pod \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\" (UID: \"4233fe38-bc5e-4f0e-a692-a14ef606c9ca\") " Sep 29 13:55:14 crc kubenswrapper[4869]: I0929 13:55:14.997497 4869 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:15 crc kubenswrapper[4869]: I0929 13:55:15.003092 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-kube-api-access-m28gj" (OuterVolumeSpecName: "kube-api-access-m28gj") pod "4233fe38-bc5e-4f0e-a692-a14ef606c9ca" (UID: "4233fe38-bc5e-4f0e-a692-a14ef606c9ca"). InnerVolumeSpecName "kube-api-access-m28gj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:55:15 crc kubenswrapper[4869]: I0929 13:55:15.009077 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-util" (OuterVolumeSpecName: "util") pod "4233fe38-bc5e-4f0e-a692-a14ef606c9ca" (UID: "4233fe38-bc5e-4f0e-a692-a14ef606c9ca"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:55:15 crc kubenswrapper[4869]: I0929 13:55:15.099316 4869 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-util\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:15 crc kubenswrapper[4869]: I0929 13:55:15.099390 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m28gj\" (UniqueName: \"kubernetes.io/projected/4233fe38-bc5e-4f0e-a692-a14ef606c9ca-kube-api-access-m28gj\") on node \"crc\" DevicePath \"\"" Sep 29 13:55:15 crc kubenswrapper[4869]: I0929 13:55:15.503371 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" event={"ID":"4233fe38-bc5e-4f0e-a692-a14ef606c9ca","Type":"ContainerDied","Data":"8831ab27dcb926faa9b6ab8a03fa0f012e4a84f2a463d7c6f6bf747cfc33db91"} Sep 29 13:55:15 crc kubenswrapper[4869]: I0929 13:55:15.503458 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8831ab27dcb926faa9b6ab8a03fa0f012e4a84f2a463d7c6f6bf747cfc33db91" Sep 29 13:55:15 crc kubenswrapper[4869]: I0929 13:55:15.503480 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.493569 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9"] Sep 29 13:55:25 crc kubenswrapper[4869]: E0929 13:55:25.494180 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerName="util" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.494194 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerName="util" Sep 29 13:55:25 crc kubenswrapper[4869]: E0929 13:55:25.494204 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerName="pull" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.494211 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerName="pull" Sep 29 13:55:25 crc kubenswrapper[4869]: E0929 13:55:25.494222 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerName="extract" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.494228 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerName="extract" Sep 29 13:55:25 crc kubenswrapper[4869]: E0929 13:55:25.494239 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac14296d-ce5b-4b73-84f6-3f39e3280f26" containerName="console" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.494245 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac14296d-ce5b-4b73-84f6-3f39e3280f26" containerName="console" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.494353 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4233fe38-bc5e-4f0e-a692-a14ef606c9ca" containerName="extract" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.494366 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac14296d-ce5b-4b73-84f6-3f39e3280f26" containerName="console" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.494849 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.497985 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.498148 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-tbgqh" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.498152 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.498076 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.498053 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.511073 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9"] Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.661908 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2fxf\" (UniqueName: \"kubernetes.io/projected/a4c44dd4-f417-4b2b-9c69-14ec65edd173-kube-api-access-h2fxf\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.661956 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a4c44dd4-f417-4b2b-9c69-14ec65edd173-apiservice-cert\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.661978 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a4c44dd4-f417-4b2b-9c69-14ec65edd173-webhook-cert\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.763491 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2fxf\" (UniqueName: \"kubernetes.io/projected/a4c44dd4-f417-4b2b-9c69-14ec65edd173-kube-api-access-h2fxf\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.763552 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a4c44dd4-f417-4b2b-9c69-14ec65edd173-apiservice-cert\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.763588 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a4c44dd4-f417-4b2b-9c69-14ec65edd173-webhook-cert\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.772846 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a4c44dd4-f417-4b2b-9c69-14ec65edd173-webhook-cert\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.773299 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a4c44dd4-f417-4b2b-9c69-14ec65edd173-apiservice-cert\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.784767 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2fxf\" (UniqueName: \"kubernetes.io/projected/a4c44dd4-f417-4b2b-9c69-14ec65edd173-kube-api-access-h2fxf\") pod \"metallb-operator-controller-manager-6966c7c87b-ck8s9\" (UID: \"a4c44dd4-f417-4b2b-9c69-14ec65edd173\") " pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.812233 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.954673 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg"] Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.955602 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.961645 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.961844 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-7m6kv" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.962078 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Sep 29 13:55:25 crc kubenswrapper[4869]: I0929 13:55:25.969973 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg"] Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.067834 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/54cee50d-edae-4d32-99e2-83b8f8e5b99c-webhook-cert\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.067975 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/54cee50d-edae-4d32-99e2-83b8f8e5b99c-apiservice-cert\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.068039 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcnhm\" (UniqueName: \"kubernetes.io/projected/54cee50d-edae-4d32-99e2-83b8f8e5b99c-kube-api-access-mcnhm\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.169182 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcnhm\" (UniqueName: \"kubernetes.io/projected/54cee50d-edae-4d32-99e2-83b8f8e5b99c-kube-api-access-mcnhm\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.169342 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/54cee50d-edae-4d32-99e2-83b8f8e5b99c-webhook-cert\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.169391 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/54cee50d-edae-4d32-99e2-83b8f8e5b99c-apiservice-cert\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.176581 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/54cee50d-edae-4d32-99e2-83b8f8e5b99c-apiservice-cert\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.188255 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/54cee50d-edae-4d32-99e2-83b8f8e5b99c-webhook-cert\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.189864 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcnhm\" (UniqueName: \"kubernetes.io/projected/54cee50d-edae-4d32-99e2-83b8f8e5b99c-kube-api-access-mcnhm\") pod \"metallb-operator-webhook-server-7f65f6b848-6v8dg\" (UID: \"54cee50d-edae-4d32-99e2-83b8f8e5b99c\") " pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.308110 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.316704 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9"] Sep 29 13:55:26 crc kubenswrapper[4869]: W0929 13:55:26.330703 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4c44dd4_f417_4b2b_9c69_14ec65edd173.slice/crio-24ef9758e2fa03915d452c3c8cf1a8f2af66723235ccb76cf64cd6b88b3fd87c WatchSource:0}: Error finding container 24ef9758e2fa03915d452c3c8cf1a8f2af66723235ccb76cf64cd6b88b3fd87c: Status 404 returned error can't find the container with id 24ef9758e2fa03915d452c3c8cf1a8f2af66723235ccb76cf64cd6b88b3fd87c Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.575683 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" event={"ID":"a4c44dd4-f417-4b2b-9c69-14ec65edd173","Type":"ContainerStarted","Data":"24ef9758e2fa03915d452c3c8cf1a8f2af66723235ccb76cf64cd6b88b3fd87c"} Sep 29 13:55:26 crc kubenswrapper[4869]: I0929 13:55:26.579437 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg"] Sep 29 13:55:26 crc kubenswrapper[4869]: W0929 13:55:26.619228 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54cee50d_edae_4d32_99e2_83b8f8e5b99c.slice/crio-6cd858a0cf66b35f6ba49c09000eec28ad8799ee280099f7b37fd38b508b121c WatchSource:0}: Error finding container 6cd858a0cf66b35f6ba49c09000eec28ad8799ee280099f7b37fd38b508b121c: Status 404 returned error can't find the container with id 6cd858a0cf66b35f6ba49c09000eec28ad8799ee280099f7b37fd38b508b121c Sep 29 13:55:27 crc kubenswrapper[4869]: I0929 13:55:27.583093 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" event={"ID":"54cee50d-edae-4d32-99e2-83b8f8e5b99c","Type":"ContainerStarted","Data":"6cd858a0cf66b35f6ba49c09000eec28ad8799ee280099f7b37fd38b508b121c"} Sep 29 13:55:33 crc kubenswrapper[4869]: I0929 13:55:33.631711 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" event={"ID":"54cee50d-edae-4d32-99e2-83b8f8e5b99c","Type":"ContainerStarted","Data":"38238c65359bb8134ebbce0718e71dd67c3227950480212123a8a7a157fe12e0"} Sep 29 13:55:33 crc kubenswrapper[4869]: I0929 13:55:33.632215 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:55:33 crc kubenswrapper[4869]: I0929 13:55:33.634123 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" event={"ID":"a4c44dd4-f417-4b2b-9c69-14ec65edd173","Type":"ContainerStarted","Data":"b36118cd4d3a30d16ef68a88fbdc1b7b605100c60c66d97aa9db88261015bf60"} Sep 29 13:55:33 crc kubenswrapper[4869]: I0929 13:55:33.634589 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:55:33 crc kubenswrapper[4869]: I0929 13:55:33.653467 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" podStartSLOduration=2.017380777 podStartE2EDuration="8.653446376s" podCreationTimestamp="2025-09-29 13:55:25 +0000 UTC" firstStartedPulling="2025-09-29 13:55:26.622737443 +0000 UTC m=+853.063381763" lastFinishedPulling="2025-09-29 13:55:33.258803042 +0000 UTC m=+859.699447362" observedRunningTime="2025-09-29 13:55:33.651860294 +0000 UTC m=+860.092504634" watchObservedRunningTime="2025-09-29 13:55:33.653446376 +0000 UTC m=+860.094090696" Sep 29 13:55:33 crc kubenswrapper[4869]: I0929 13:55:33.675894 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" podStartSLOduration=1.7608327080000001 podStartE2EDuration="8.675866998s" podCreationTimestamp="2025-09-29 13:55:25 +0000 UTC" firstStartedPulling="2025-09-29 13:55:26.333444749 +0000 UTC m=+852.774089069" lastFinishedPulling="2025-09-29 13:55:33.248479029 +0000 UTC m=+859.689123359" observedRunningTime="2025-09-29 13:55:33.673137596 +0000 UTC m=+860.113781916" watchObservedRunningTime="2025-09-29 13:55:33.675866998 +0000 UTC m=+860.116511318" Sep 29 13:55:46 crc kubenswrapper[4869]: I0929 13:55:46.316688 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7f65f6b848-6v8dg" Sep 29 13:56:05 crc kubenswrapper[4869]: I0929 13:56:05.815724 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6966c7c87b-ck8s9" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.626272 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-psh4w"] Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.629745 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.632206 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-qn549" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.632569 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.633960 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l"] Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.634229 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.635110 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.638088 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.650348 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l"] Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.740031 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-xtbwg"] Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.741626 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xtbwg" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.746787 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.746968 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.747104 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.747494 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-7z4nb" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.789644 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-5d688f5ffc-6lrfv"] Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.790899 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.793588 4869 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798282 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-sockets\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798327 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-conf\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798356 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmv8h\" (UniqueName: \"kubernetes.io/projected/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-kube-api-access-zmv8h\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798389 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-startup\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798410 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-reloader\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798429 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-metrics\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798459 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd64h\" (UniqueName: \"kubernetes.io/projected/37e44d72-369b-4c65-90f3-73b692729b60-kube-api-access-sd64h\") pod \"frr-k8s-webhook-server-5478bdb765-6fj4l\" (UID: \"37e44d72-369b-4c65-90f3-73b692729b60\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798477 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/37e44d72-369b-4c65-90f3-73b692729b60-cert\") pod \"frr-k8s-webhook-server-5478bdb765-6fj4l\" (UID: \"37e44d72-369b-4c65-90f3-73b692729b60\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.798499 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-metrics-certs\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.812534 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5d688f5ffc-6lrfv"] Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899642 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-reloader\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899689 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmn2d\" (UniqueName: \"kubernetes.io/projected/26a588ca-3edb-42f4-b436-eccd007e8cbc-kube-api-access-bmn2d\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899709 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-metrics\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899738 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c178702e-60a4-430d-b3e3-58e59663407e-cert\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899756 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd64h\" (UniqueName: \"kubernetes.io/projected/37e44d72-369b-4c65-90f3-73b692729b60-kube-api-access-sd64h\") pod \"frr-k8s-webhook-server-5478bdb765-6fj4l\" (UID: \"37e44d72-369b-4c65-90f3-73b692729b60\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899771 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/37e44d72-369b-4c65-90f3-73b692729b60-cert\") pod \"frr-k8s-webhook-server-5478bdb765-6fj4l\" (UID: \"37e44d72-369b-4c65-90f3-73b692729b60\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899793 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2s8g\" (UniqueName: \"kubernetes.io/projected/c178702e-60a4-430d-b3e3-58e59663407e-kube-api-access-d2s8g\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899814 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-metrics-certs\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899834 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c178702e-60a4-430d-b3e3-58e59663407e-metrics-certs\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899866 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-sockets\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899888 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-metrics-certs\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899903 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-conf\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899924 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmv8h\" (UniqueName: \"kubernetes.io/projected/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-kube-api-access-zmv8h\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899945 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899960 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/26a588ca-3edb-42f4-b436-eccd007e8cbc-metallb-excludel2\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.899984 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-startup\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.900991 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-startup\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.901227 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-reloader\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.901419 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-metrics\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.902817 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-sockets\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.903158 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-frr-conf\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.928402 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/37e44d72-369b-4c65-90f3-73b692729b60-cert\") pod \"frr-k8s-webhook-server-5478bdb765-6fj4l\" (UID: \"37e44d72-369b-4c65-90f3-73b692729b60\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.928829 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-metrics-certs\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.936720 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd64h\" (UniqueName: \"kubernetes.io/projected/37e44d72-369b-4c65-90f3-73b692729b60-kube-api-access-sd64h\") pod \"frr-k8s-webhook-server-5478bdb765-6fj4l\" (UID: \"37e44d72-369b-4c65-90f3-73b692729b60\") " pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.938177 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmv8h\" (UniqueName: \"kubernetes.io/projected/1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01-kube-api-access-zmv8h\") pod \"frr-k8s-psh4w\" (UID: \"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01\") " pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.954210 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:06 crc kubenswrapper[4869]: I0929 13:56:06.967923 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.001400 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmn2d\" (UniqueName: \"kubernetes.io/projected/26a588ca-3edb-42f4-b436-eccd007e8cbc-kube-api-access-bmn2d\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.001472 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c178702e-60a4-430d-b3e3-58e59663407e-cert\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.001498 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2s8g\" (UniqueName: \"kubernetes.io/projected/c178702e-60a4-430d-b3e3-58e59663407e-kube-api-access-d2s8g\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.001532 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c178702e-60a4-430d-b3e3-58e59663407e-metrics-certs\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.001567 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-metrics-certs\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.001600 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.001633 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/26a588ca-3edb-42f4-b436-eccd007e8cbc-metallb-excludel2\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.002428 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/26a588ca-3edb-42f4-b436-eccd007e8cbc-metallb-excludel2\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:07 crc kubenswrapper[4869]: E0929 13:56:07.004873 4869 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Sep 29 13:56:07 crc kubenswrapper[4869]: E0929 13:56:07.004949 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist podName:26a588ca-3edb-42f4-b436-eccd007e8cbc nodeName:}" failed. No retries permitted until 2025-09-29 13:56:07.504931246 +0000 UTC m=+893.945575566 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist") pod "speaker-xtbwg" (UID: "26a588ca-3edb-42f4-b436-eccd007e8cbc") : secret "metallb-memberlist" not found Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.009734 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c178702e-60a4-430d-b3e3-58e59663407e-cert\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.010401 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c178702e-60a4-430d-b3e3-58e59663407e-metrics-certs\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.011219 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-metrics-certs\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.033308 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmn2d\" (UniqueName: \"kubernetes.io/projected/26a588ca-3edb-42f4-b436-eccd007e8cbc-kube-api-access-bmn2d\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.035093 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2s8g\" (UniqueName: \"kubernetes.io/projected/c178702e-60a4-430d-b3e3-58e59663407e-kube-api-access-d2s8g\") pod \"controller-5d688f5ffc-6lrfv\" (UID: \"c178702e-60a4-430d-b3e3-58e59663407e\") " pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.129172 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.268690 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l"] Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.393221 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5d688f5ffc-6lrfv"] Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.542057 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:07 crc kubenswrapper[4869]: E0929 13:56:07.542319 4869 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Sep 29 13:56:07 crc kubenswrapper[4869]: E0929 13:56:07.542792 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist podName:26a588ca-3edb-42f4-b436-eccd007e8cbc nodeName:}" failed. No retries permitted until 2025-09-29 13:56:08.542737217 +0000 UTC m=+894.983381567 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist") pod "speaker-xtbwg" (UID: "26a588ca-3edb-42f4-b436-eccd007e8cbc") : secret "metallb-memberlist" not found Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.895163 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerStarted","Data":"3b42f0a71458a945fc2e044684458b839b9007f482bb8ba873a3ee18a6857995"} Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.898662 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5d688f5ffc-6lrfv" event={"ID":"c178702e-60a4-430d-b3e3-58e59663407e","Type":"ContainerStarted","Data":"b24c615f55d4e3b4457787087b761f214861360fe2128c9e093705fbb6088180"} Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.898789 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5d688f5ffc-6lrfv" event={"ID":"c178702e-60a4-430d-b3e3-58e59663407e","Type":"ContainerStarted","Data":"51cb924fcbf105072f616475ff979af9047a2b7fa16d7de2343399654c8e4521"} Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.898816 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5d688f5ffc-6lrfv" event={"ID":"c178702e-60a4-430d-b3e3-58e59663407e","Type":"ContainerStarted","Data":"5fb8d4f498b5755a903a8c39a14aaeeae6f9bdfaa37974bd86f324c28b2193fe"} Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.898852 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.900427 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" event={"ID":"37e44d72-369b-4c65-90f3-73b692729b60","Type":"ContainerStarted","Data":"297639ecb60319c6a703d702520823e22d47104677dda44f573a4df550046c97"} Sep 29 13:56:07 crc kubenswrapper[4869]: I0929 13:56:07.932878 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-5d688f5ffc-6lrfv" podStartSLOduration=1.932855772 podStartE2EDuration="1.932855772s" podCreationTimestamp="2025-09-29 13:56:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:56:07.930662014 +0000 UTC m=+894.371306354" watchObservedRunningTime="2025-09-29 13:56:07.932855772 +0000 UTC m=+894.373500092" Sep 29 13:56:08 crc kubenswrapper[4869]: I0929 13:56:08.556746 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:08 crc kubenswrapper[4869]: I0929 13:56:08.564960 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/26a588ca-3edb-42f4-b436-eccd007e8cbc-memberlist\") pod \"speaker-xtbwg\" (UID: \"26a588ca-3edb-42f4-b436-eccd007e8cbc\") " pod="metallb-system/speaker-xtbwg" Sep 29 13:56:08 crc kubenswrapper[4869]: I0929 13:56:08.860586 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xtbwg" Sep 29 13:56:08 crc kubenswrapper[4869]: I0929 13:56:08.909908 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xtbwg" event={"ID":"26a588ca-3edb-42f4-b436-eccd007e8cbc","Type":"ContainerStarted","Data":"c7bcae5d0800abc3d2d2264365facea11374ddd7e6f3b2ffd07a94fe6e20db4d"} Sep 29 13:56:09 crc kubenswrapper[4869]: I0929 13:56:09.918865 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xtbwg" event={"ID":"26a588ca-3edb-42f4-b436-eccd007e8cbc","Type":"ContainerStarted","Data":"8f858fa724d9fa7e96ab302dee2a3a34c74718b8eeed6a418387563d682b760c"} Sep 29 13:56:09 crc kubenswrapper[4869]: I0929 13:56:09.919358 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xtbwg" event={"ID":"26a588ca-3edb-42f4-b436-eccd007e8cbc","Type":"ContainerStarted","Data":"d6e277da00a3c19386a7eca560d042bf03dd782f768187c3703361dd3ef92227"} Sep 29 13:56:09 crc kubenswrapper[4869]: I0929 13:56:09.919574 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-xtbwg" Sep 29 13:56:09 crc kubenswrapper[4869]: I0929 13:56:09.950699 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-xtbwg" podStartSLOduration=3.9506716710000003 podStartE2EDuration="3.950671671s" podCreationTimestamp="2025-09-29 13:56:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:56:09.946236904 +0000 UTC m=+896.386881224" watchObservedRunningTime="2025-09-29 13:56:09.950671671 +0000 UTC m=+896.391316001" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.545336 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8bppk"] Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.549498 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.556094 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8bppk"] Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.623143 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-catalog-content\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.623207 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-utilities\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.623259 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69hrr\" (UniqueName: \"kubernetes.io/projected/42efba91-3590-46dc-bd79-4db39a016a70-kube-api-access-69hrr\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.724431 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-catalog-content\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.724498 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-utilities\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.724551 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69hrr\" (UniqueName: \"kubernetes.io/projected/42efba91-3590-46dc-bd79-4db39a016a70-kube-api-access-69hrr\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.725081 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-catalog-content\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.725165 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-utilities\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.755388 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69hrr\" (UniqueName: \"kubernetes.io/projected/42efba91-3590-46dc-bd79-4db39a016a70-kube-api-access-69hrr\") pod \"redhat-marketplace-8bppk\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:12 crc kubenswrapper[4869]: I0929 13:56:12.874560 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:15 crc kubenswrapper[4869]: I0929 13:56:15.258789 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8bppk"] Sep 29 13:56:15 crc kubenswrapper[4869]: W0929 13:56:15.276871 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42efba91_3590_46dc_bd79_4db39a016a70.slice/crio-a2f92093d40af6933ab034f4acebbe55f9d036910b557f9bcdbf8e90bd44103f WatchSource:0}: Error finding container a2f92093d40af6933ab034f4acebbe55f9d036910b557f9bcdbf8e90bd44103f: Status 404 returned error can't find the container with id a2f92093d40af6933ab034f4acebbe55f9d036910b557f9bcdbf8e90bd44103f Sep 29 13:56:15 crc kubenswrapper[4869]: I0929 13:56:15.981607 4869 generic.go:334] "Generic (PLEG): container finished" podID="1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01" containerID="7b9e53e68542157952a6a9ca210844ca906b2ee93e51a225e0fa49ab6995eff9" exitCode=0 Sep 29 13:56:15 crc kubenswrapper[4869]: I0929 13:56:15.981950 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerDied","Data":"7b9e53e68542157952a6a9ca210844ca906b2ee93e51a225e0fa49ab6995eff9"} Sep 29 13:56:15 crc kubenswrapper[4869]: I0929 13:56:15.986469 4869 generic.go:334] "Generic (PLEG): container finished" podID="42efba91-3590-46dc-bd79-4db39a016a70" containerID="16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6" exitCode=0 Sep 29 13:56:15 crc kubenswrapper[4869]: I0929 13:56:15.986895 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8bppk" event={"ID":"42efba91-3590-46dc-bd79-4db39a016a70","Type":"ContainerDied","Data":"16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6"} Sep 29 13:56:15 crc kubenswrapper[4869]: I0929 13:56:15.987024 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8bppk" event={"ID":"42efba91-3590-46dc-bd79-4db39a016a70","Type":"ContainerStarted","Data":"a2f92093d40af6933ab034f4acebbe55f9d036910b557f9bcdbf8e90bd44103f"} Sep 29 13:56:15 crc kubenswrapper[4869]: I0929 13:56:15.989454 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" event={"ID":"37e44d72-369b-4c65-90f3-73b692729b60","Type":"ContainerStarted","Data":"4a7c935cf7fc4e27a51b14ac9a5142cbeb43425982d461f21a21815c800ab50d"} Sep 29 13:56:15 crc kubenswrapper[4869]: I0929 13:56:15.990037 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:16 crc kubenswrapper[4869]: I0929 13:56:16.028049 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" podStartSLOduration=2.221643412 podStartE2EDuration="10.028022925s" podCreationTimestamp="2025-09-29 13:56:06 +0000 UTC" firstStartedPulling="2025-09-29 13:56:07.287867601 +0000 UTC m=+893.728511921" lastFinishedPulling="2025-09-29 13:56:15.094247114 +0000 UTC m=+901.534891434" observedRunningTime="2025-09-29 13:56:16.026014942 +0000 UTC m=+902.466659292" watchObservedRunningTime="2025-09-29 13:56:16.028022925 +0000 UTC m=+902.468667245" Sep 29 13:56:16 crc kubenswrapper[4869]: I0929 13:56:16.997756 4869 generic.go:334] "Generic (PLEG): container finished" podID="42efba91-3590-46dc-bd79-4db39a016a70" containerID="a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef" exitCode=0 Sep 29 13:56:16 crc kubenswrapper[4869]: I0929 13:56:16.997816 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8bppk" event={"ID":"42efba91-3590-46dc-bd79-4db39a016a70","Type":"ContainerDied","Data":"a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef"} Sep 29 13:56:17 crc kubenswrapper[4869]: I0929 13:56:17.001527 4869 generic.go:334] "Generic (PLEG): container finished" podID="1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01" containerID="12c301e6dff2c76ec1ad6e48b9d223439e5f760eb921ec464f08b5b70bae0805" exitCode=0 Sep 29 13:56:17 crc kubenswrapper[4869]: I0929 13:56:17.002378 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerDied","Data":"12c301e6dff2c76ec1ad6e48b9d223439e5f760eb921ec464f08b5b70bae0805"} Sep 29 13:56:17 crc kubenswrapper[4869]: I0929 13:56:17.136053 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-5d688f5ffc-6lrfv" Sep 29 13:56:18 crc kubenswrapper[4869]: I0929 13:56:18.012901 4869 generic.go:334] "Generic (PLEG): container finished" podID="1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01" containerID="6cb266f829024ba7bf8e995d2cc95601fe03a8c1a0f5a79e72bf377548066aa6" exitCode=0 Sep 29 13:56:18 crc kubenswrapper[4869]: I0929 13:56:18.013003 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerDied","Data":"6cb266f829024ba7bf8e995d2cc95601fe03a8c1a0f5a79e72bf377548066aa6"} Sep 29 13:56:18 crc kubenswrapper[4869]: I0929 13:56:18.023530 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8bppk" event={"ID":"42efba91-3590-46dc-bd79-4db39a016a70","Type":"ContainerStarted","Data":"b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0"} Sep 29 13:56:18 crc kubenswrapper[4869]: I0929 13:56:18.087078 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8bppk" podStartSLOduration=4.601516228 podStartE2EDuration="6.08704386s" podCreationTimestamp="2025-09-29 13:56:12 +0000 UTC" firstStartedPulling="2025-09-29 13:56:15.989668273 +0000 UTC m=+902.430312603" lastFinishedPulling="2025-09-29 13:56:17.475195885 +0000 UTC m=+903.915840235" observedRunningTime="2025-09-29 13:56:18.078632308 +0000 UTC m=+904.519276658" watchObservedRunningTime="2025-09-29 13:56:18.08704386 +0000 UTC m=+904.527688220" Sep 29 13:56:19 crc kubenswrapper[4869]: I0929 13:56:19.037583 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerStarted","Data":"53bccc621999ae52a8929247d7ca20f1869f62f574ff21e2f021f6b5ede8a1a8"} Sep 29 13:56:19 crc kubenswrapper[4869]: I0929 13:56:19.038128 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:19 crc kubenswrapper[4869]: I0929 13:56:19.038143 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerStarted","Data":"10a3f363b098afbc3af8f995bc4ddecb59ace7d5f18bd155c013e665612bb654"} Sep 29 13:56:19 crc kubenswrapper[4869]: I0929 13:56:19.038153 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerStarted","Data":"bb7a9fd4ba89f458a612215a9af6505edddb78ce3b6925545e57b9f07a891741"} Sep 29 13:56:19 crc kubenswrapper[4869]: I0929 13:56:19.038162 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerStarted","Data":"1e84a52e7be4f11c0835b8ff921089f1e7bed0cf7675faea18ab2162ba630136"} Sep 29 13:56:19 crc kubenswrapper[4869]: I0929 13:56:19.038172 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerStarted","Data":"8a900827b593bbd8132ee549ea68d72c7adaa820d6fcbda024c626b0c5a7c997"} Sep 29 13:56:19 crc kubenswrapper[4869]: I0929 13:56:19.038183 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-psh4w" event={"ID":"1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01","Type":"ContainerStarted","Data":"4dce12e40b30d7c34f8304b2919c560e202f56e6405307d57fe33338c6dfe0a3"} Sep 29 13:56:19 crc kubenswrapper[4869]: I0929 13:56:19.062513 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-psh4w" podStartSLOduration=5.155176155 podStartE2EDuration="13.062487471s" podCreationTimestamp="2025-09-29 13:56:06 +0000 UTC" firstStartedPulling="2025-09-29 13:56:07.210788357 +0000 UTC m=+893.651432677" lastFinishedPulling="2025-09-29 13:56:15.118099673 +0000 UTC m=+901.558743993" observedRunningTime="2025-09-29 13:56:19.058311281 +0000 UTC m=+905.498955601" watchObservedRunningTime="2025-09-29 13:56:19.062487471 +0000 UTC m=+905.503131791" Sep 29 13:56:20 crc kubenswrapper[4869]: I0929 13:56:20.657539 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:56:20 crc kubenswrapper[4869]: I0929 13:56:20.657696 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:56:21 crc kubenswrapper[4869]: I0929 13:56:21.955296 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:21 crc kubenswrapper[4869]: I0929 13:56:21.998229 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:22 crc kubenswrapper[4869]: I0929 13:56:22.875073 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:22 crc kubenswrapper[4869]: I0929 13:56:22.875169 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:22 crc kubenswrapper[4869]: I0929 13:56:22.935566 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:23 crc kubenswrapper[4869]: I0929 13:56:23.113198 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:23 crc kubenswrapper[4869]: I0929 13:56:23.178487 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8bppk"] Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.090059 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8bppk" podUID="42efba91-3590-46dc-bd79-4db39a016a70" containerName="registry-server" containerID="cri-o://b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0" gracePeriod=2 Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.521188 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.536139 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69hrr\" (UniqueName: \"kubernetes.io/projected/42efba91-3590-46dc-bd79-4db39a016a70-kube-api-access-69hrr\") pod \"42efba91-3590-46dc-bd79-4db39a016a70\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.536222 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-utilities\") pod \"42efba91-3590-46dc-bd79-4db39a016a70\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.536334 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-catalog-content\") pod \"42efba91-3590-46dc-bd79-4db39a016a70\" (UID: \"42efba91-3590-46dc-bd79-4db39a016a70\") " Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.540116 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-utilities" (OuterVolumeSpecName: "utilities") pod "42efba91-3590-46dc-bd79-4db39a016a70" (UID: "42efba91-3590-46dc-bd79-4db39a016a70"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.550160 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42efba91-3590-46dc-bd79-4db39a016a70-kube-api-access-69hrr" (OuterVolumeSpecName: "kube-api-access-69hrr") pod "42efba91-3590-46dc-bd79-4db39a016a70" (UID: "42efba91-3590-46dc-bd79-4db39a016a70"). InnerVolumeSpecName "kube-api-access-69hrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.554635 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "42efba91-3590-46dc-bd79-4db39a016a70" (UID: "42efba91-3590-46dc-bd79-4db39a016a70"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.638996 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.639062 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69hrr\" (UniqueName: \"kubernetes.io/projected/42efba91-3590-46dc-bd79-4db39a016a70-kube-api-access-69hrr\") on node \"crc\" DevicePath \"\"" Sep 29 13:56:25 crc kubenswrapper[4869]: I0929 13:56:25.639081 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42efba91-3590-46dc-bd79-4db39a016a70-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.103115 4869 generic.go:334] "Generic (PLEG): container finished" podID="42efba91-3590-46dc-bd79-4db39a016a70" containerID="b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0" exitCode=0 Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.103210 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8bppk" event={"ID":"42efba91-3590-46dc-bd79-4db39a016a70","Type":"ContainerDied","Data":"b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0"} Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.103290 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8bppk" event={"ID":"42efba91-3590-46dc-bd79-4db39a016a70","Type":"ContainerDied","Data":"a2f92093d40af6933ab034f4acebbe55f9d036910b557f9bcdbf8e90bd44103f"} Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.103318 4869 scope.go:117] "RemoveContainer" containerID="b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.103384 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8bppk" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.132437 4869 scope.go:117] "RemoveContainer" containerID="a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.159749 4869 scope.go:117] "RemoveContainer" containerID="16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.168542 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8bppk"] Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.179925 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8bppk"] Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.185482 4869 scope.go:117] "RemoveContainer" containerID="b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0" Sep 29 13:56:26 crc kubenswrapper[4869]: E0929 13:56:26.186233 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0\": container with ID starting with b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0 not found: ID does not exist" containerID="b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.186383 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0"} err="failed to get container status \"b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0\": rpc error: code = NotFound desc = could not find container \"b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0\": container with ID starting with b84e360583be2bb736424d98373e5c7d10e3fb7a029a2f527a291ad46a2644e0 not found: ID does not exist" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.186482 4869 scope.go:117] "RemoveContainer" containerID="a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef" Sep 29 13:56:26 crc kubenswrapper[4869]: E0929 13:56:26.186999 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef\": container with ID starting with a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef not found: ID does not exist" containerID="a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.187093 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef"} err="failed to get container status \"a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef\": rpc error: code = NotFound desc = could not find container \"a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef\": container with ID starting with a341e127b01fa9a89ea5cda83a7a1be62c18d1843e021fe93d51d129fc2cf5ef not found: ID does not exist" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.187149 4869 scope.go:117] "RemoveContainer" containerID="16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6" Sep 29 13:56:26 crc kubenswrapper[4869]: E0929 13:56:26.187454 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6\": container with ID starting with 16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6 not found: ID does not exist" containerID="16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.187545 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6"} err="failed to get container status \"16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6\": rpc error: code = NotFound desc = could not find container \"16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6\": container with ID starting with 16c2bd18bb671115670be7749cb46f21252d5f917452e8ce6be9e81320468ab6 not found: ID does not exist" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.256843 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42efba91-3590-46dc-bd79-4db39a016a70" path="/var/lib/kubelet/pods/42efba91-3590-46dc-bd79-4db39a016a70/volumes" Sep 29 13:56:26 crc kubenswrapper[4869]: I0929 13:56:26.973285 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-5478bdb765-6fj4l" Sep 29 13:56:28 crc kubenswrapper[4869]: I0929 13:56:28.865786 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-xtbwg" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.171932 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-nbpn4"] Sep 29 13:56:32 crc kubenswrapper[4869]: E0929 13:56:32.173130 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42efba91-3590-46dc-bd79-4db39a016a70" containerName="extract-content" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.173152 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="42efba91-3590-46dc-bd79-4db39a016a70" containerName="extract-content" Sep 29 13:56:32 crc kubenswrapper[4869]: E0929 13:56:32.173175 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42efba91-3590-46dc-bd79-4db39a016a70" containerName="registry-server" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.173184 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="42efba91-3590-46dc-bd79-4db39a016a70" containerName="registry-server" Sep 29 13:56:32 crc kubenswrapper[4869]: E0929 13:56:32.173195 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42efba91-3590-46dc-bd79-4db39a016a70" containerName="extract-utilities" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.173203 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="42efba91-3590-46dc-bd79-4db39a016a70" containerName="extract-utilities" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.173356 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="42efba91-3590-46dc-bd79-4db39a016a70" containerName="registry-server" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.173996 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nbpn4" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.223249 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-nbpn4"] Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.223686 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.223969 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.226316 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-qsfxd" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.242054 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h724b\" (UniqueName: \"kubernetes.io/projected/321f8f5b-5b29-449e-a7f0-da1fa4beb1f1-kube-api-access-h724b\") pod \"openstack-operator-index-nbpn4\" (UID: \"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1\") " pod="openstack-operators/openstack-operator-index-nbpn4" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.342735 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h724b\" (UniqueName: \"kubernetes.io/projected/321f8f5b-5b29-449e-a7f0-da1fa4beb1f1-kube-api-access-h724b\") pod \"openstack-operator-index-nbpn4\" (UID: \"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1\") " pod="openstack-operators/openstack-operator-index-nbpn4" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.364568 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h724b\" (UniqueName: \"kubernetes.io/projected/321f8f5b-5b29-449e-a7f0-da1fa4beb1f1-kube-api-access-h724b\") pod \"openstack-operator-index-nbpn4\" (UID: \"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1\") " pod="openstack-operators/openstack-operator-index-nbpn4" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.547942 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nbpn4" Sep 29 13:56:32 crc kubenswrapper[4869]: I0929 13:56:32.962687 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-nbpn4"] Sep 29 13:56:33 crc kubenswrapper[4869]: I0929 13:56:33.157355 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nbpn4" event={"ID":"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1","Type":"ContainerStarted","Data":"dece7db0605f519c96b6aeb35092f32864402985f97630455a28ba5d9f5ac6ef"} Sep 29 13:56:35 crc kubenswrapper[4869]: I0929 13:56:35.184925 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nbpn4" event={"ID":"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1","Type":"ContainerStarted","Data":"c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca"} Sep 29 13:56:35 crc kubenswrapper[4869]: I0929 13:56:35.211985 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-nbpn4" podStartSLOduration=1.180959816 podStartE2EDuration="3.211960405s" podCreationTimestamp="2025-09-29 13:56:32 +0000 UTC" firstStartedPulling="2025-09-29 13:56:32.970969386 +0000 UTC m=+919.411613706" lastFinishedPulling="2025-09-29 13:56:35.001969975 +0000 UTC m=+921.442614295" observedRunningTime="2025-09-29 13:56:35.207691419 +0000 UTC m=+921.648335779" watchObservedRunningTime="2025-09-29 13:56:35.211960405 +0000 UTC m=+921.652604725" Sep 29 13:56:35 crc kubenswrapper[4869]: I0929 13:56:35.545392 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-nbpn4"] Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.156816 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-rpv4c"] Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.159397 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.171336 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-rpv4c"] Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.204298 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67bxr\" (UniqueName: \"kubernetes.io/projected/fe014b15-35d6-4091-aece-db0abc23b4a9-kube-api-access-67bxr\") pod \"openstack-operator-index-rpv4c\" (UID: \"fe014b15-35d6-4091-aece-db0abc23b4a9\") " pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.306225 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67bxr\" (UniqueName: \"kubernetes.io/projected/fe014b15-35d6-4091-aece-db0abc23b4a9-kube-api-access-67bxr\") pod \"openstack-operator-index-rpv4c\" (UID: \"fe014b15-35d6-4091-aece-db0abc23b4a9\") " pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.343724 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67bxr\" (UniqueName: \"kubernetes.io/projected/fe014b15-35d6-4091-aece-db0abc23b4a9-kube-api-access-67bxr\") pod \"openstack-operator-index-rpv4c\" (UID: \"fe014b15-35d6-4091-aece-db0abc23b4a9\") " pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.505590 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.950219 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-rpv4c"] Sep 29 13:56:36 crc kubenswrapper[4869]: I0929 13:56:36.965042 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-psh4w" Sep 29 13:56:37 crc kubenswrapper[4869]: I0929 13:56:37.207731 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rpv4c" event={"ID":"fe014b15-35d6-4091-aece-db0abc23b4a9","Type":"ContainerStarted","Data":"a5a4f315ad4226e2363e19d38c798413e5d060943c96b001eed4fd3dfae4c704"} Sep 29 13:56:37 crc kubenswrapper[4869]: I0929 13:56:37.207868 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-nbpn4" podUID="321f8f5b-5b29-449e-a7f0-da1fa4beb1f1" containerName="registry-server" containerID="cri-o://c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca" gracePeriod=2 Sep 29 13:56:37 crc kubenswrapper[4869]: I0929 13:56:37.609250 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nbpn4" Sep 29 13:56:37 crc kubenswrapper[4869]: I0929 13:56:37.630250 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h724b\" (UniqueName: \"kubernetes.io/projected/321f8f5b-5b29-449e-a7f0-da1fa4beb1f1-kube-api-access-h724b\") pod \"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1\" (UID: \"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1\") " Sep 29 13:56:37 crc kubenswrapper[4869]: I0929 13:56:37.643941 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/321f8f5b-5b29-449e-a7f0-da1fa4beb1f1-kube-api-access-h724b" (OuterVolumeSpecName: "kube-api-access-h724b") pod "321f8f5b-5b29-449e-a7f0-da1fa4beb1f1" (UID: "321f8f5b-5b29-449e-a7f0-da1fa4beb1f1"). InnerVolumeSpecName "kube-api-access-h724b". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:56:37 crc kubenswrapper[4869]: I0929 13:56:37.732784 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h724b\" (UniqueName: \"kubernetes.io/projected/321f8f5b-5b29-449e-a7f0-da1fa4beb1f1-kube-api-access-h724b\") on node \"crc\" DevicePath \"\"" Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.219148 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rpv4c" event={"ID":"fe014b15-35d6-4091-aece-db0abc23b4a9","Type":"ContainerStarted","Data":"217317868a84e492e8453747ec8802a3cb1c7d80331146dcc5930f78d34bec5e"} Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.222095 4869 generic.go:334] "Generic (PLEG): container finished" podID="321f8f5b-5b29-449e-a7f0-da1fa4beb1f1" containerID="c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca" exitCode=0 Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.222170 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nbpn4" event={"ID":"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1","Type":"ContainerDied","Data":"c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca"} Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.222250 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nbpn4" event={"ID":"321f8f5b-5b29-449e-a7f0-da1fa4beb1f1","Type":"ContainerDied","Data":"dece7db0605f519c96b6aeb35092f32864402985f97630455a28ba5d9f5ac6ef"} Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.222285 4869 scope.go:117] "RemoveContainer" containerID="c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca" Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.222684 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nbpn4" Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.244438 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-rpv4c" podStartSLOduration=2.181184229 podStartE2EDuration="2.244421096s" podCreationTimestamp="2025-09-29 13:56:36 +0000 UTC" firstStartedPulling="2025-09-29 13:56:36.969794008 +0000 UTC m=+923.410438328" lastFinishedPulling="2025-09-29 13:56:37.033030875 +0000 UTC m=+923.473675195" observedRunningTime="2025-09-29 13:56:38.241083966 +0000 UTC m=+924.681728286" watchObservedRunningTime="2025-09-29 13:56:38.244421096 +0000 UTC m=+924.685065416" Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.249830 4869 scope.go:117] "RemoveContainer" containerID="c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca" Sep 29 13:56:38 crc kubenswrapper[4869]: E0929 13:56:38.251166 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca\": container with ID starting with c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca not found: ID does not exist" containerID="c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca" Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.251223 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca"} err="failed to get container status \"c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca\": rpc error: code = NotFound desc = could not find container \"c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca\": container with ID starting with c83f3637c21df2ed1f972b6fa4aa2e2057226b0b186a343787acd75366f5fbca not found: ID does not exist" Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.268318 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-nbpn4"] Sep 29 13:56:38 crc kubenswrapper[4869]: I0929 13:56:38.272411 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-nbpn4"] Sep 29 13:56:40 crc kubenswrapper[4869]: I0929 13:56:40.253327 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="321f8f5b-5b29-449e-a7f0-da1fa4beb1f1" path="/var/lib/kubelet/pods/321f8f5b-5b29-449e-a7f0-da1fa4beb1f1/volumes" Sep 29 13:56:46 crc kubenswrapper[4869]: I0929 13:56:46.506438 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:46 crc kubenswrapper[4869]: I0929 13:56:46.506937 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:46 crc kubenswrapper[4869]: I0929 13:56:46.545182 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:47 crc kubenswrapper[4869]: I0929 13:56:47.330113 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-rpv4c" Sep 29 13:56:50 crc kubenswrapper[4869]: I0929 13:56:50.657088 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:56:50 crc kubenswrapper[4869]: I0929 13:56:50.657660 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.405812 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9"] Sep 29 13:56:54 crc kubenswrapper[4869]: E0929 13:56:54.406787 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="321f8f5b-5b29-449e-a7f0-da1fa4beb1f1" containerName="registry-server" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.406807 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="321f8f5b-5b29-449e-a7f0-da1fa4beb1f1" containerName="registry-server" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.406988 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="321f8f5b-5b29-449e-a7f0-da1fa4beb1f1" containerName="registry-server" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.408666 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.413161 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-pgdmw" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.413477 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9"] Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.593265 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-util\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.593574 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-bundle\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.593710 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvhbd\" (UniqueName: \"kubernetes.io/projected/4646821b-b147-4416-bb71-1d722d94a87a-kube-api-access-mvhbd\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.695344 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-util\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.695475 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-bundle\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.695508 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvhbd\" (UniqueName: \"kubernetes.io/projected/4646821b-b147-4416-bb71-1d722d94a87a-kube-api-access-mvhbd\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.696094 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-bundle\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.697553 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-util\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.724792 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvhbd\" (UniqueName: \"kubernetes.io/projected/4646821b-b147-4416-bb71-1d722d94a87a-kube-api-access-mvhbd\") pod \"095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:54 crc kubenswrapper[4869]: I0929 13:56:54.733832 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:55 crc kubenswrapper[4869]: I0929 13:56:55.151682 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9"] Sep 29 13:56:55 crc kubenswrapper[4869]: I0929 13:56:55.355448 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" event={"ID":"4646821b-b147-4416-bb71-1d722d94a87a","Type":"ContainerStarted","Data":"18892a06430fe7046bb9b6127ea5f18f885f548fa971e12da0a34c20b3861977"} Sep 29 13:56:55 crc kubenswrapper[4869]: I0929 13:56:55.355513 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" event={"ID":"4646821b-b147-4416-bb71-1d722d94a87a","Type":"ContainerStarted","Data":"276dcf57b57f2883d9337ed48134d78a0fd1d07e7552fb3a224edc5be6f94e5d"} Sep 29 13:56:56 crc kubenswrapper[4869]: I0929 13:56:56.366455 4869 generic.go:334] "Generic (PLEG): container finished" podID="4646821b-b147-4416-bb71-1d722d94a87a" containerID="18892a06430fe7046bb9b6127ea5f18f885f548fa971e12da0a34c20b3861977" exitCode=0 Sep 29 13:56:56 crc kubenswrapper[4869]: I0929 13:56:56.366517 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" event={"ID":"4646821b-b147-4416-bb71-1d722d94a87a","Type":"ContainerDied","Data":"18892a06430fe7046bb9b6127ea5f18f885f548fa971e12da0a34c20b3861977"} Sep 29 13:56:57 crc kubenswrapper[4869]: I0929 13:56:57.379526 4869 generic.go:334] "Generic (PLEG): container finished" podID="4646821b-b147-4416-bb71-1d722d94a87a" containerID="38288f08cb2e56df2088ffaaf01509bb13b2801b6814a5f83a719e21e051f88b" exitCode=0 Sep 29 13:56:57 crc kubenswrapper[4869]: I0929 13:56:57.380145 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" event={"ID":"4646821b-b147-4416-bb71-1d722d94a87a","Type":"ContainerDied","Data":"38288f08cb2e56df2088ffaaf01509bb13b2801b6814a5f83a719e21e051f88b"} Sep 29 13:56:58 crc kubenswrapper[4869]: I0929 13:56:58.391685 4869 generic.go:334] "Generic (PLEG): container finished" podID="4646821b-b147-4416-bb71-1d722d94a87a" containerID="47d8752a6a318cb2172c3eedeca86d13ec5208e02eb3624290f79c3065184217" exitCode=0 Sep 29 13:56:58 crc kubenswrapper[4869]: I0929 13:56:58.391769 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" event={"ID":"4646821b-b147-4416-bb71-1d722d94a87a","Type":"ContainerDied","Data":"47d8752a6a318cb2172c3eedeca86d13ec5208e02eb3624290f79c3065184217"} Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.701045 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.879380 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-util\") pod \"4646821b-b147-4416-bb71-1d722d94a87a\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.879602 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-bundle\") pod \"4646821b-b147-4416-bb71-1d722d94a87a\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.879707 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvhbd\" (UniqueName: \"kubernetes.io/projected/4646821b-b147-4416-bb71-1d722d94a87a-kube-api-access-mvhbd\") pod \"4646821b-b147-4416-bb71-1d722d94a87a\" (UID: \"4646821b-b147-4416-bb71-1d722d94a87a\") " Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.881677 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-bundle" (OuterVolumeSpecName: "bundle") pod "4646821b-b147-4416-bb71-1d722d94a87a" (UID: "4646821b-b147-4416-bb71-1d722d94a87a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.889819 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4646821b-b147-4416-bb71-1d722d94a87a-kube-api-access-mvhbd" (OuterVolumeSpecName: "kube-api-access-mvhbd") pod "4646821b-b147-4416-bb71-1d722d94a87a" (UID: "4646821b-b147-4416-bb71-1d722d94a87a"). InnerVolumeSpecName "kube-api-access-mvhbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.894870 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-util" (OuterVolumeSpecName: "util") pod "4646821b-b147-4416-bb71-1d722d94a87a" (UID: "4646821b-b147-4416-bb71-1d722d94a87a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.981231 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvhbd\" (UniqueName: \"kubernetes.io/projected/4646821b-b147-4416-bb71-1d722d94a87a-kube-api-access-mvhbd\") on node \"crc\" DevicePath \"\"" Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.981273 4869 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-util\") on node \"crc\" DevicePath \"\"" Sep 29 13:56:59 crc kubenswrapper[4869]: I0929 13:56:59.981283 4869 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4646821b-b147-4416-bb71-1d722d94a87a-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 13:57:00 crc kubenswrapper[4869]: I0929 13:57:00.416824 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" event={"ID":"4646821b-b147-4416-bb71-1d722d94a87a","Type":"ContainerDied","Data":"276dcf57b57f2883d9337ed48134d78a0fd1d07e7552fb3a224edc5be6f94e5d"} Sep 29 13:57:00 crc kubenswrapper[4869]: I0929 13:57:00.416896 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="276dcf57b57f2883d9337ed48134d78a0fd1d07e7552fb3a224edc5be6f94e5d" Sep 29 13:57:00 crc kubenswrapper[4869]: I0929 13:57:00.416939 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.157127 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr"] Sep 29 13:57:08 crc kubenswrapper[4869]: E0929 13:57:08.158305 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4646821b-b147-4416-bb71-1d722d94a87a" containerName="pull" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.158322 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4646821b-b147-4416-bb71-1d722d94a87a" containerName="pull" Sep 29 13:57:08 crc kubenswrapper[4869]: E0929 13:57:08.158345 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4646821b-b147-4416-bb71-1d722d94a87a" containerName="extract" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.158353 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4646821b-b147-4416-bb71-1d722d94a87a" containerName="extract" Sep 29 13:57:08 crc kubenswrapper[4869]: E0929 13:57:08.158368 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4646821b-b147-4416-bb71-1d722d94a87a" containerName="util" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.158376 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4646821b-b147-4416-bb71-1d722d94a87a" containerName="util" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.158507 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4646821b-b147-4416-bb71-1d722d94a87a" containerName="extract" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.159279 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.182969 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-gsbwj" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.201601 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr"] Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.315277 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt7mf\" (UniqueName: \"kubernetes.io/projected/177c83a1-b737-4c5e-b046-260e28bb6e4e-kube-api-access-nt7mf\") pod \"openstack-operator-controller-operator-7d6679759-s65sr\" (UID: \"177c83a1-b737-4c5e-b046-260e28bb6e4e\") " pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.417456 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt7mf\" (UniqueName: \"kubernetes.io/projected/177c83a1-b737-4c5e-b046-260e28bb6e4e-kube-api-access-nt7mf\") pod \"openstack-operator-controller-operator-7d6679759-s65sr\" (UID: \"177c83a1-b737-4c5e-b046-260e28bb6e4e\") " pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.441548 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt7mf\" (UniqueName: \"kubernetes.io/projected/177c83a1-b737-4c5e-b046-260e28bb6e4e-kube-api-access-nt7mf\") pod \"openstack-operator-controller-operator-7d6679759-s65sr\" (UID: \"177c83a1-b737-4c5e-b046-260e28bb6e4e\") " pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.481200 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" Sep 29 13:57:08 crc kubenswrapper[4869]: I0929 13:57:08.770597 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr"] Sep 29 13:57:09 crc kubenswrapper[4869]: I0929 13:57:09.521069 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" event={"ID":"177c83a1-b737-4c5e-b046-260e28bb6e4e","Type":"ContainerStarted","Data":"4e9193c7b3a6d40e8eb002cb78871f8218b76d635561399a3d7f4b1a8b4d7b1d"} Sep 29 13:57:14 crc kubenswrapper[4869]: I0929 13:57:14.566426 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" event={"ID":"177c83a1-b737-4c5e-b046-260e28bb6e4e","Type":"ContainerStarted","Data":"7e44fb0e28b2d42865b212371f74b458ddfd90a76cf6d6961980cd4ec7058ae7"} Sep 29 13:57:16 crc kubenswrapper[4869]: I0929 13:57:16.590955 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" event={"ID":"177c83a1-b737-4c5e-b046-260e28bb6e4e","Type":"ContainerStarted","Data":"e7e9ff9c0fb87da76faead6ac7b79bf5c28f689f7272af4df1c6a279b2bc9a05"} Sep 29 13:57:16 crc kubenswrapper[4869]: I0929 13:57:16.591540 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" Sep 29 13:57:16 crc kubenswrapper[4869]: I0929 13:57:16.629760 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" podStartSLOduration=1.500109148 podStartE2EDuration="8.629735001s" podCreationTimestamp="2025-09-29 13:57:08 +0000 UTC" firstStartedPulling="2025-09-29 13:57:08.78583099 +0000 UTC m=+955.226475310" lastFinishedPulling="2025-09-29 13:57:15.915456843 +0000 UTC m=+962.356101163" observedRunningTime="2025-09-29 13:57:16.623678477 +0000 UTC m=+963.064322807" watchObservedRunningTime="2025-09-29 13:57:16.629735001 +0000 UTC m=+963.070379331" Sep 29 13:57:18 crc kubenswrapper[4869]: I0929 13:57:18.509718 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7d6679759-s65sr" Sep 29 13:57:20 crc kubenswrapper[4869]: I0929 13:57:20.657730 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:57:20 crc kubenswrapper[4869]: I0929 13:57:20.658337 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:57:20 crc kubenswrapper[4869]: I0929 13:57:20.658412 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 13:57:20 crc kubenswrapper[4869]: I0929 13:57:20.659521 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"759af6dee641bff740e90c1320e6cbfb6e8ed5030cb0042b5634388f31d6067a"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 13:57:20 crc kubenswrapper[4869]: I0929 13:57:20.659642 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://759af6dee641bff740e90c1320e6cbfb6e8ed5030cb0042b5634388f31d6067a" gracePeriod=600 Sep 29 13:57:21 crc kubenswrapper[4869]: I0929 13:57:21.628833 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="759af6dee641bff740e90c1320e6cbfb6e8ed5030cb0042b5634388f31d6067a" exitCode=0 Sep 29 13:57:21 crc kubenswrapper[4869]: I0929 13:57:21.628882 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"759af6dee641bff740e90c1320e6cbfb6e8ed5030cb0042b5634388f31d6067a"} Sep 29 13:57:21 crc kubenswrapper[4869]: I0929 13:57:21.629308 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"67f7d50b20b583fdfc73b613bb7e9647bd7a3b5d2b7aab39171da5a668956c60"} Sep 29 13:57:21 crc kubenswrapper[4869]: I0929 13:57:21.629338 4869 scope.go:117] "RemoveContainer" containerID="044bc1bbf05f7114209acb0aca5c7026aa69010563b4d0217b04eb424198b8d6" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.709800 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.711843 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.719848 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.720999 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.726846 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.727321 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-7bsr8" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.727663 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-6nthd" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.727979 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.729753 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-mvl9t" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.749057 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.760595 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.761825 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.776839 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-skmfk" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.813451 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.824330 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-8ff95898-h254r"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.830143 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.830310 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.830743 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.836593 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-nvbzk" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.844747 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-8ff95898-h254r"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.858320 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-695847bc78-578dq"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.859659 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.864704 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vssnn\" (UniqueName: \"kubernetes.io/projected/d7f3dd6f-b5bd-482e-a41a-e426459a8bfd-kube-api-access-vssnn\") pod \"glance-operator-controller-manager-67b5d44b7f-57g9g\" (UID: \"d7f3dd6f-b5bd-482e-a41a-e426459a8bfd\") " pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.864758 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nfbm\" (UniqueName: \"kubernetes.io/projected/f719d8af-e87a-4f02-bd10-fe1a2899b71d-kube-api-access-2nfbm\") pod \"cinder-operator-controller-manager-748c574d75-shlvx\" (UID: \"f719d8af-e87a-4f02-bd10-fe1a2899b71d\") " pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.864780 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xk689\" (UniqueName: \"kubernetes.io/projected/e2386607-ebad-4616-9e23-d81e2c64350c-kube-api-access-xk689\") pod \"designate-operator-controller-manager-7d74f4d695-xxmd5\" (UID: \"e2386607-ebad-4616-9e23-d81e2c64350c\") " pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.864835 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkxhs\" (UniqueName: \"kubernetes.io/projected/6ed6de20-a7c3-40be-bc68-ff4f978dba14-kube-api-access-tkxhs\") pod \"barbican-operator-controller-manager-6495d75b5-qq2wr\" (UID: \"6ed6de20-a7c3-40be-bc68-ff4f978dba14\") " pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.865955 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-mc826" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.871919 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-695847bc78-578dq"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.877896 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.879410 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.889559 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.892360 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.892711 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-bgkrz" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.893806 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.895027 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.902744 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-w9rxc" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.913575 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.924914 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.927044 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.936572 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-6x69m" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.945067 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz"] Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.946550 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.953214 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-8pdjz" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.965746 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vssnn\" (UniqueName: \"kubernetes.io/projected/d7f3dd6f-b5bd-482e-a41a-e426459a8bfd-kube-api-access-vssnn\") pod \"glance-operator-controller-manager-67b5d44b7f-57g9g\" (UID: \"d7f3dd6f-b5bd-482e-a41a-e426459a8bfd\") " pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.965803 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8767r\" (UniqueName: \"kubernetes.io/projected/43f73602-1bf6-4550-bad6-ce9cedaa6955-kube-api-access-8767r\") pod \"heat-operator-controller-manager-8ff95898-h254r\" (UID: \"43f73602-1bf6-4550-bad6-ce9cedaa6955\") " pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.965838 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nfbm\" (UniqueName: \"kubernetes.io/projected/f719d8af-e87a-4f02-bd10-fe1a2899b71d-kube-api-access-2nfbm\") pod \"cinder-operator-controller-manager-748c574d75-shlvx\" (UID: \"f719d8af-e87a-4f02-bd10-fe1a2899b71d\") " pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.965862 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xk689\" (UniqueName: \"kubernetes.io/projected/e2386607-ebad-4616-9e23-d81e2c64350c-kube-api-access-xk689\") pod \"designate-operator-controller-manager-7d74f4d695-xxmd5\" (UID: \"e2386607-ebad-4616-9e23-d81e2c64350c\") " pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.965911 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4h9p\" (UniqueName: \"kubernetes.io/projected/17b6d42c-924b-48c9-9a78-13cb3a8d7776-kube-api-access-p4h9p\") pod \"horizon-operator-controller-manager-695847bc78-578dq\" (UID: \"17b6d42c-924b-48c9-9a78-13cb3a8d7776\") " pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" Sep 29 13:57:51 crc kubenswrapper[4869]: I0929 13:57:51.965932 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkxhs\" (UniqueName: \"kubernetes.io/projected/6ed6de20-a7c3-40be-bc68-ff4f978dba14-kube-api-access-tkxhs\") pod \"barbican-operator-controller-manager-6495d75b5-qq2wr\" (UID: \"6ed6de20-a7c3-40be-bc68-ff4f978dba14\") " pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.015691 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.030571 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nfbm\" (UniqueName: \"kubernetes.io/projected/f719d8af-e87a-4f02-bd10-fe1a2899b71d-kube-api-access-2nfbm\") pod \"cinder-operator-controller-manager-748c574d75-shlvx\" (UID: \"f719d8af-e87a-4f02-bd10-fe1a2899b71d\") " pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.037317 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkxhs\" (UniqueName: \"kubernetes.io/projected/6ed6de20-a7c3-40be-bc68-ff4f978dba14-kube-api-access-tkxhs\") pod \"barbican-operator-controller-manager-6495d75b5-qq2wr\" (UID: \"6ed6de20-a7c3-40be-bc68-ff4f978dba14\") " pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.043657 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.047961 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vssnn\" (UniqueName: \"kubernetes.io/projected/d7f3dd6f-b5bd-482e-a41a-e426459a8bfd-kube-api-access-vssnn\") pod \"glance-operator-controller-manager-67b5d44b7f-57g9g\" (UID: \"d7f3dd6f-b5bd-482e-a41a-e426459a8bfd\") " pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.055442 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.067503 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xk689\" (UniqueName: \"kubernetes.io/projected/e2386607-ebad-4616-9e23-d81e2c64350c-kube-api-access-xk689\") pod \"designate-operator-controller-manager-7d74f4d695-xxmd5\" (UID: \"e2386607-ebad-4616-9e23-d81e2c64350c\") " pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.069441 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx7pf\" (UniqueName: \"kubernetes.io/projected/7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0-kube-api-access-hx7pf\") pod \"manila-operator-controller-manager-56cf9c6b99-2lgbz\" (UID: \"7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0\") " pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.069486 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4h9p\" (UniqueName: \"kubernetes.io/projected/17b6d42c-924b-48c9-9a78-13cb3a8d7776-kube-api-access-p4h9p\") pod \"horizon-operator-controller-manager-695847bc78-578dq\" (UID: \"17b6d42c-924b-48c9-9a78-13cb3a8d7776\") " pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.069510 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert\") pod \"infra-operator-controller-manager-858cd69f49-lxvwh\" (UID: \"b76e3341-e4f0-4711-95cc-874919666585\") " pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.069547 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrl6k\" (UniqueName: \"kubernetes.io/projected/bfc73681-1e32-4e79-818f-944b609ef92b-kube-api-access-wrl6k\") pod \"ironic-operator-controller-manager-9fc8d5567-lsch2\" (UID: \"bfc73681-1e32-4e79-818f-944b609ef92b\") " pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.069619 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8767r\" (UniqueName: \"kubernetes.io/projected/43f73602-1bf6-4550-bad6-ce9cedaa6955-kube-api-access-8767r\") pod \"heat-operator-controller-manager-8ff95898-h254r\" (UID: \"43f73602-1bf6-4550-bad6-ce9cedaa6955\") " pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.069658 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb4cc\" (UniqueName: \"kubernetes.io/projected/c13c38cf-c074-4e90-a79f-58ad4a24db6e-kube-api-access-lb4cc\") pod \"keystone-operator-controller-manager-7bf498966c-z9j4w\" (UID: \"c13c38cf-c074-4e90-a79f-58ad4a24db6e\") " pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.069694 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp42b\" (UniqueName: \"kubernetes.io/projected/b76e3341-e4f0-4711-95cc-874919666585-kube-api-access-kp42b\") pod \"infra-operator-controller-manager-858cd69f49-lxvwh\" (UID: \"b76e3341-e4f0-4711-95cc-874919666585\") " pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.079347 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.139342 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4h9p\" (UniqueName: \"kubernetes.io/projected/17b6d42c-924b-48c9-9a78-13cb3a8d7776-kube-api-access-p4h9p\") pod \"horizon-operator-controller-manager-695847bc78-578dq\" (UID: \"17b6d42c-924b-48c9-9a78-13cb3a8d7776\") " pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.141526 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.142348 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8767r\" (UniqueName: \"kubernetes.io/projected/43f73602-1bf6-4550-bad6-ce9cedaa6955-kube-api-access-8767r\") pod \"heat-operator-controller-manager-8ff95898-h254r\" (UID: \"43f73602-1bf6-4550-bad6-ce9cedaa6955\") " pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.143532 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.162600 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.166759 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.168038 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.173079 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-l6lnk" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.174296 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx7pf\" (UniqueName: \"kubernetes.io/projected/7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0-kube-api-access-hx7pf\") pod \"manila-operator-controller-manager-56cf9c6b99-2lgbz\" (UID: \"7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0\") " pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.174318 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert\") pod \"infra-operator-controller-manager-858cd69f49-lxvwh\" (UID: \"b76e3341-e4f0-4711-95cc-874919666585\") " pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.174343 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrl6k\" (UniqueName: \"kubernetes.io/projected/bfc73681-1e32-4e79-818f-944b609ef92b-kube-api-access-wrl6k\") pod \"ironic-operator-controller-manager-9fc8d5567-lsch2\" (UID: \"bfc73681-1e32-4e79-818f-944b609ef92b\") " pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.174388 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb4cc\" (UniqueName: \"kubernetes.io/projected/c13c38cf-c074-4e90-a79f-58ad4a24db6e-kube-api-access-lb4cc\") pod \"keystone-operator-controller-manager-7bf498966c-z9j4w\" (UID: \"c13c38cf-c074-4e90-a79f-58ad4a24db6e\") " pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.174415 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp42b\" (UniqueName: \"kubernetes.io/projected/b76e3341-e4f0-4711-95cc-874919666585-kube-api-access-kp42b\") pod \"infra-operator-controller-manager-858cd69f49-lxvwh\" (UID: \"b76e3341-e4f0-4711-95cc-874919666585\") " pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:52 crc kubenswrapper[4869]: E0929 13:57:52.176743 4869 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Sep 29 13:57:52 crc kubenswrapper[4869]: E0929 13:57:52.176890 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert podName:b76e3341-e4f0-4711-95cc-874919666585 nodeName:}" failed. No retries permitted until 2025-09-29 13:57:52.676865004 +0000 UTC m=+999.117509324 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert") pod "infra-operator-controller-manager-858cd69f49-lxvwh" (UID: "b76e3341-e4f0-4711-95cc-874919666585") : secret "infra-operator-webhook-server-cert" not found Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.181639 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.185304 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.190252 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-qfwj5" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.201199 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.219679 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.232970 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx7pf\" (UniqueName: \"kubernetes.io/projected/7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0-kube-api-access-hx7pf\") pod \"manila-operator-controller-manager-56cf9c6b99-2lgbz\" (UID: \"7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0\") " pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.234938 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb4cc\" (UniqueName: \"kubernetes.io/projected/c13c38cf-c074-4e90-a79f-58ad4a24db6e-kube-api-access-lb4cc\") pod \"keystone-operator-controller-manager-7bf498966c-z9j4w\" (UID: \"c13c38cf-c074-4e90-a79f-58ad4a24db6e\") " pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.236341 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrl6k\" (UniqueName: \"kubernetes.io/projected/bfc73681-1e32-4e79-818f-944b609ef92b-kube-api-access-wrl6k\") pod \"ironic-operator-controller-manager-9fc8d5567-lsch2\" (UID: \"bfc73681-1e32-4e79-818f-944b609ef92b\") " pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.239285 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp42b\" (UniqueName: \"kubernetes.io/projected/b76e3341-e4f0-4711-95cc-874919666585-kube-api-access-kp42b\") pod \"infra-operator-controller-manager-858cd69f49-lxvwh\" (UID: \"b76e3341-e4f0-4711-95cc-874919666585\") " pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.275436 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.277213 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.277237 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.278391 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.280762 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qd7g\" (UniqueName: \"kubernetes.io/projected/8667518f-048c-48b3-b838-3ff38cbc76b7-kube-api-access-6qd7g\") pod \"mariadb-operator-controller-manager-687b9cf756-l57c7\" (UID: \"8667518f-048c-48b3-b838-3ff38cbc76b7\") " pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.280830 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdzzp\" (UniqueName: \"kubernetes.io/projected/6f08d81a-137d-43b2-8c78-8227d4cd848c-kube-api-access-hdzzp\") pod \"neutron-operator-controller-manager-54d766c9f9-ggvvf\" (UID: \"6f08d81a-137d-43b2-8c78-8227d4cd848c\") " pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.285623 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.286665 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.287577 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.287714 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.307748 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-2jj5j" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.307957 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-pmmhg" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.309229 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.315751 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.324016 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.326693 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.328069 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.330884 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.330905 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.359416 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-d2xbg" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.359670 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.360084 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-52zk5" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.360194 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.365901 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.389239 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.389735 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxgsg\" (UniqueName: \"kubernetes.io/projected/2a0fb7a7-5469-4513-ac6f-3ce8f28b9310-kube-api-access-zxgsg\") pod \"nova-operator-controller-manager-c7c776c96-v9wbw\" (UID: \"2a0fb7a7-5469-4513-ac6f-3ce8f28b9310\") " pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.389795 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qd7g\" (UniqueName: \"kubernetes.io/projected/8667518f-048c-48b3-b838-3ff38cbc76b7-kube-api-access-6qd7g\") pod \"mariadb-operator-controller-manager-687b9cf756-l57c7\" (UID: \"8667518f-048c-48b3-b838-3ff38cbc76b7\") " pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.389830 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v2lx\" (UniqueName: \"kubernetes.io/projected/ad5406a4-1938-49ee-87ef-cae347abba83-kube-api-access-7v2lx\") pod \"octavia-operator-controller-manager-76fcc6dc7c-dlvlw\" (UID: \"ad5406a4-1938-49ee-87ef-cae347abba83\") " pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.389876 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdzzp\" (UniqueName: \"kubernetes.io/projected/6f08d81a-137d-43b2-8c78-8227d4cd848c-kube-api-access-hdzzp\") pod \"neutron-operator-controller-manager-54d766c9f9-ggvvf\" (UID: \"6f08d81a-137d-43b2-8c78-8227d4cd848c\") " pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.390993 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.391292 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.414566 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-g7m5v" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.414824 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-m62vm" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.414918 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.420781 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.422315 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.455144 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-9kkrt" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.460190 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.462612 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qd7g\" (UniqueName: \"kubernetes.io/projected/8667518f-048c-48b3-b838-3ff38cbc76b7-kube-api-access-6qd7g\") pod \"mariadb-operator-controller-manager-687b9cf756-l57c7\" (UID: \"8667518f-048c-48b3-b838-3ff38cbc76b7\") " pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.471537 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdzzp\" (UniqueName: \"kubernetes.io/projected/6f08d81a-137d-43b2-8c78-8227d4cd848c-kube-api-access-hdzzp\") pod \"neutron-operator-controller-manager-54d766c9f9-ggvvf\" (UID: \"6f08d81a-137d-43b2-8c78-8227d4cd848c\") " pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.495721 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxgsg\" (UniqueName: \"kubernetes.io/projected/2a0fb7a7-5469-4513-ac6f-3ce8f28b9310-kube-api-access-zxgsg\") pod \"nova-operator-controller-manager-c7c776c96-v9wbw\" (UID: \"2a0fb7a7-5469-4513-ac6f-3ce8f28b9310\") " pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.495780 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v2lx\" (UniqueName: \"kubernetes.io/projected/ad5406a4-1938-49ee-87ef-cae347abba83-kube-api-access-7v2lx\") pod \"octavia-operator-controller-manager-76fcc6dc7c-dlvlw\" (UID: \"ad5406a4-1938-49ee-87ef-cae347abba83\") " pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.495806 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgrth\" (UniqueName: \"kubernetes.io/projected/2716f7dc-4fa9-46a8-abcb-71016098e732-kube-api-access-vgrth\") pod \"placement-operator-controller-manager-774b97b48-8bn6g\" (UID: \"2716f7dc-4fa9-46a8-abcb-71016098e732\") " pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.495829 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpnfr\" (UniqueName: \"kubernetes.io/projected/cc44f555-b327-4135-a59c-5c085be0ca2e-kube-api-access-wpnfr\") pod \"openstack-baremetal-operator-controller-manager-6d776955-jq4lw\" (UID: \"cc44f555-b327-4135-a59c-5c085be0ca2e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.495860 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnx7l\" (UniqueName: \"kubernetes.io/projected/bc76592b-e1dc-41f2-8696-8edd7e3d8315-kube-api-access-vnx7l\") pod \"swift-operator-controller-manager-bc7dc7bd9-59sjm\" (UID: \"bc76592b-e1dc-41f2-8696-8edd7e3d8315\") " pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.495882 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-jq4lw\" (UID: \"cc44f555-b327-4135-a59c-5c085be0ca2e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.495923 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sm4x\" (UniqueName: \"kubernetes.io/projected/6f94106a-7f34-4265-a988-c90ac7466919-kube-api-access-9sm4x\") pod \"ovn-operator-controller-manager-5f95c46c78-5wk7g\" (UID: \"6f94106a-7f34-4265-a988-c90ac7466919\") " pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.544820 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.554129 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.554740 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.557571 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxgsg\" (UniqueName: \"kubernetes.io/projected/2a0fb7a7-5469-4513-ac6f-3ce8f28b9310-kube-api-access-zxgsg\") pod \"nova-operator-controller-manager-c7c776c96-v9wbw\" (UID: \"2a0fb7a7-5469-4513-ac6f-3ce8f28b9310\") " pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.560375 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v2lx\" (UniqueName: \"kubernetes.io/projected/ad5406a4-1938-49ee-87ef-cae347abba83-kube-api-access-7v2lx\") pod \"octavia-operator-controller-manager-76fcc6dc7c-dlvlw\" (UID: \"ad5406a4-1938-49ee-87ef-cae347abba83\") " pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.598561 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgrth\" (UniqueName: \"kubernetes.io/projected/2716f7dc-4fa9-46a8-abcb-71016098e732-kube-api-access-vgrth\") pod \"placement-operator-controller-manager-774b97b48-8bn6g\" (UID: \"2716f7dc-4fa9-46a8-abcb-71016098e732\") " pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.598642 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nmbn\" (UniqueName: \"kubernetes.io/projected/436fe0ad-2010-46f2-ad86-ee243370d675-kube-api-access-8nmbn\") pod \"telemetry-operator-controller-manager-5bf96cfbc4-rhc9x\" (UID: \"436fe0ad-2010-46f2-ad86-ee243370d675\") " pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.598674 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpnfr\" (UniqueName: \"kubernetes.io/projected/cc44f555-b327-4135-a59c-5c085be0ca2e-kube-api-access-wpnfr\") pod \"openstack-baremetal-operator-controller-manager-6d776955-jq4lw\" (UID: \"cc44f555-b327-4135-a59c-5c085be0ca2e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.598710 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnx7l\" (UniqueName: \"kubernetes.io/projected/bc76592b-e1dc-41f2-8696-8edd7e3d8315-kube-api-access-vnx7l\") pod \"swift-operator-controller-manager-bc7dc7bd9-59sjm\" (UID: \"bc76592b-e1dc-41f2-8696-8edd7e3d8315\") " pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.598737 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-jq4lw\" (UID: \"cc44f555-b327-4135-a59c-5c085be0ca2e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.598784 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sm4x\" (UniqueName: \"kubernetes.io/projected/6f94106a-7f34-4265-a988-c90ac7466919-kube-api-access-9sm4x\") pod \"ovn-operator-controller-manager-5f95c46c78-5wk7g\" (UID: \"6f94106a-7f34-4265-a988-c90ac7466919\") " pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" Sep 29 13:57:52 crc kubenswrapper[4869]: E0929 13:57:52.600307 4869 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Sep 29 13:57:52 crc kubenswrapper[4869]: E0929 13:57:52.600385 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert podName:cc44f555-b327-4135-a59c-5c085be0ca2e nodeName:}" failed. No retries permitted until 2025-09-29 13:57:53.100366975 +0000 UTC m=+999.541011295 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert") pod "openstack-baremetal-operator-controller-manager-6d776955-jq4lw" (UID: "cc44f555-b327-4135-a59c-5c085be0ca2e") : secret "openstack-baremetal-operator-webhook-server-cert" not found Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.651681 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnx7l\" (UniqueName: \"kubernetes.io/projected/bc76592b-e1dc-41f2-8696-8edd7e3d8315-kube-api-access-vnx7l\") pod \"swift-operator-controller-manager-bc7dc7bd9-59sjm\" (UID: \"bc76592b-e1dc-41f2-8696-8edd7e3d8315\") " pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.659831 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.661817 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpnfr\" (UniqueName: \"kubernetes.io/projected/cc44f555-b327-4135-a59c-5c085be0ca2e-kube-api-access-wpnfr\") pod \"openstack-baremetal-operator-controller-manager-6d776955-jq4lw\" (UID: \"cc44f555-b327-4135-a59c-5c085be0ca2e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.661895 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.663230 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.668445 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgrth\" (UniqueName: \"kubernetes.io/projected/2716f7dc-4fa9-46a8-abcb-71016098e732-kube-api-access-vgrth\") pod \"placement-operator-controller-manager-774b97b48-8bn6g\" (UID: \"2716f7dc-4fa9-46a8-abcb-71016098e732\") " pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.668571 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-djstf" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.668968 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sm4x\" (UniqueName: \"kubernetes.io/projected/6f94106a-7f34-4265-a988-c90ac7466919-kube-api-access-9sm4x\") pod \"ovn-operator-controller-manager-5f95c46c78-5wk7g\" (UID: \"6f94106a-7f34-4265-a988-c90ac7466919\") " pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.704359 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.704721 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert\") pod \"infra-operator-controller-manager-858cd69f49-lxvwh\" (UID: \"b76e3341-e4f0-4711-95cc-874919666585\") " pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.704841 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nmbn\" (UniqueName: \"kubernetes.io/projected/436fe0ad-2010-46f2-ad86-ee243370d675-kube-api-access-8nmbn\") pod \"telemetry-operator-controller-manager-5bf96cfbc4-rhc9x\" (UID: \"436fe0ad-2010-46f2-ad86-ee243370d675\") " pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" Sep 29 13:57:52 crc kubenswrapper[4869]: E0929 13:57:52.704998 4869 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Sep 29 13:57:52 crc kubenswrapper[4869]: E0929 13:57:52.705100 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert podName:b76e3341-e4f0-4711-95cc-874919666585 nodeName:}" failed. No retries permitted until 2025-09-29 13:57:53.705077808 +0000 UTC m=+1000.145722128 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert") pod "infra-operator-controller-manager-858cd69f49-lxvwh" (UID: "b76e3341-e4f0-4711-95cc-874919666585") : secret "infra-operator-webhook-server-cert" not found Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.764111 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.777889 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.783583 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nmbn\" (UniqueName: \"kubernetes.io/projected/436fe0ad-2010-46f2-ad86-ee243370d675-kube-api-access-8nmbn\") pod \"telemetry-operator-controller-manager-5bf96cfbc4-rhc9x\" (UID: \"436fe0ad-2010-46f2-ad86-ee243370d675\") " pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.808917 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4xsg\" (UniqueName: \"kubernetes.io/projected/9eb7b21e-54fc-4b89-977a-6ad6481e7237-kube-api-access-k4xsg\") pod \"test-operator-controller-manager-f66b554c6-xmgtg\" (UID: \"9eb7b21e-54fc-4b89-977a-6ad6481e7237\") " pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.809747 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.816208 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.822469 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-xclj9" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.852231 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.854492 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.876987 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.917368 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4xsg\" (UniqueName: \"kubernetes.io/projected/9eb7b21e-54fc-4b89-977a-6ad6481e7237-kube-api-access-k4xsg\") pod \"test-operator-controller-manager-f66b554c6-xmgtg\" (UID: \"9eb7b21e-54fc-4b89-977a-6ad6481e7237\") " pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.917506 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26snl\" (UniqueName: \"kubernetes.io/projected/aa4f1345-cc28-491e-876c-a125e73b4a8b-kube-api-access-26snl\") pod \"watcher-operator-controller-manager-5f5b8d96d6-ppscx\" (UID: \"aa4f1345-cc28-491e-876c-a125e73b4a8b\") " pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.921248 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" event={"ID":"6ed6de20-a7c3-40be-bc68-ff4f978dba14","Type":"ContainerStarted","Data":"9b65d0ce0256f95ceeb5778e70ee443318c55411096e881942ffa3ea87a1f2e6"} Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.934187 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.942084 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.943373 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.946819 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-qdwgn" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.946836 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.962785 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.978048 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4xsg\" (UniqueName: \"kubernetes.io/projected/9eb7b21e-54fc-4b89-977a-6ad6481e7237-kube-api-access-k4xsg\") pod \"test-operator-controller-manager-f66b554c6-xmgtg\" (UID: \"9eb7b21e-54fc-4b89-977a-6ad6481e7237\") " pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.995382 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm"] Sep 29 13:57:52 crc kubenswrapper[4869]: I0929 13:57:52.998880 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.007554 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-6v5dj" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.007945 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm"] Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.013489 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.017226 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr"] Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.018528 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68d24\" (UniqueName: \"kubernetes.io/projected/a923032e-f8f0-4622-8de9-01973ec22782-kube-api-access-68d24\") pod \"openstack-operator-controller-manager-cdbfd4cbb-hbh55\" (UID: \"a923032e-f8f0-4622-8de9-01973ec22782\") " pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.019188 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26snl\" (UniqueName: \"kubernetes.io/projected/aa4f1345-cc28-491e-876c-a125e73b4a8b-kube-api-access-26snl\") pod \"watcher-operator-controller-manager-5f5b8d96d6-ppscx\" (UID: \"aa4f1345-cc28-491e-876c-a125e73b4a8b\") " pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.019225 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a923032e-f8f0-4622-8de9-01973ec22782-cert\") pod \"openstack-operator-controller-manager-cdbfd4cbb-hbh55\" (UID: \"a923032e-f8f0-4622-8de9-01973ec22782\") " pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.047143 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26snl\" (UniqueName: \"kubernetes.io/projected/aa4f1345-cc28-491e-876c-a125e73b4a8b-kube-api-access-26snl\") pod \"watcher-operator-controller-manager-5f5b8d96d6-ppscx\" (UID: \"aa4f1345-cc28-491e-876c-a125e73b4a8b\") " pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.109972 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.121109 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68d24\" (UniqueName: \"kubernetes.io/projected/a923032e-f8f0-4622-8de9-01973ec22782-kube-api-access-68d24\") pod \"openstack-operator-controller-manager-cdbfd4cbb-hbh55\" (UID: \"a923032e-f8f0-4622-8de9-01973ec22782\") " pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.121556 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-jq4lw\" (UID: \"cc44f555-b327-4135-a59c-5c085be0ca2e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.121660 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv5hm\" (UniqueName: \"kubernetes.io/projected/e73fced4-cf05-4c8d-b1af-39c07ef69514-kube-api-access-pv5hm\") pod \"rabbitmq-cluster-operator-manager-79d8469568-j4gqm\" (UID: \"e73fced4-cf05-4c8d-b1af-39c07ef69514\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.121697 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a923032e-f8f0-4622-8de9-01973ec22782-cert\") pod \"openstack-operator-controller-manager-cdbfd4cbb-hbh55\" (UID: \"a923032e-f8f0-4622-8de9-01973ec22782\") " pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:53 crc kubenswrapper[4869]: E0929 13:57:53.122486 4869 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Sep 29 13:57:53 crc kubenswrapper[4869]: E0929 13:57:53.122530 4869 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert podName:cc44f555-b327-4135-a59c-5c085be0ca2e nodeName:}" failed. No retries permitted until 2025-09-29 13:57:54.122516443 +0000 UTC m=+1000.563160763 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert") pod "openstack-baremetal-operator-controller-manager-6d776955-jq4lw" (UID: "cc44f555-b327-4135-a59c-5c085be0ca2e") : secret "openstack-baremetal-operator-webhook-server-cert" not found Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.146424 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a923032e-f8f0-4622-8de9-01973ec22782-cert\") pod \"openstack-operator-controller-manager-cdbfd4cbb-hbh55\" (UID: \"a923032e-f8f0-4622-8de9-01973ec22782\") " pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.149034 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68d24\" (UniqueName: \"kubernetes.io/projected/a923032e-f8f0-4622-8de9-01973ec22782-kube-api-access-68d24\") pod \"openstack-operator-controller-manager-cdbfd4cbb-hbh55\" (UID: \"a923032e-f8f0-4622-8de9-01973ec22782\") " pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.183687 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.223486 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv5hm\" (UniqueName: \"kubernetes.io/projected/e73fced4-cf05-4c8d-b1af-39c07ef69514-kube-api-access-pv5hm\") pod \"rabbitmq-cluster-operator-manager-79d8469568-j4gqm\" (UID: \"e73fced4-cf05-4c8d-b1af-39c07ef69514\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.254753 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv5hm\" (UniqueName: \"kubernetes.io/projected/e73fced4-cf05-4c8d-b1af-39c07ef69514-kube-api-access-pv5hm\") pod \"rabbitmq-cluster-operator-manager-79d8469568-j4gqm\" (UID: \"e73fced4-cf05-4c8d-b1af-39c07ef69514\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.277336 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.502843 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.539985 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx"] Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.546438 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-695847bc78-578dq"] Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.570596 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5"] Sep 29 13:57:53 crc kubenswrapper[4869]: W0929 13:57:53.577425 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2386607_ebad_4616_9e23_d81e2c64350c.slice/crio-065ce47b32dbcbfd30d89d11823a75888ba2dd9f66ae535f71a1aaa12b7b3374 WatchSource:0}: Error finding container 065ce47b32dbcbfd30d89d11823a75888ba2dd9f66ae535f71a1aaa12b7b3374: Status 404 returned error can't find the container with id 065ce47b32dbcbfd30d89d11823a75888ba2dd9f66ae535f71a1aaa12b7b3374 Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.579002 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz"] Sep 29 13:57:53 crc kubenswrapper[4869]: W0929 13:57:53.580863 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43f73602_1bf6_4550_bad6_ce9cedaa6955.slice/crio-d7ebc610052a4347fd1ab75e7056b1a0cd2c5d977fa1fbeea9423c699d768b74 WatchSource:0}: Error finding container d7ebc610052a4347fd1ab75e7056b1a0cd2c5d977fa1fbeea9423c699d768b74: Status 404 returned error can't find the container with id d7ebc610052a4347fd1ab75e7056b1a0cd2c5d977fa1fbeea9423c699d768b74 Sep 29 13:57:53 crc kubenswrapper[4869]: W0929 13:57:53.585512 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ddf27f6_9984_44ee_9d1d_3aa6e38b2af0.slice/crio-3751fec20691dfcb8534f5f0af599c28cf6f4f16b81ad57455da40bdb9aafe71 WatchSource:0}: Error finding container 3751fec20691dfcb8534f5f0af599c28cf6f4f16b81ad57455da40bdb9aafe71: Status 404 returned error can't find the container with id 3751fec20691dfcb8534f5f0af599c28cf6f4f16b81ad57455da40bdb9aafe71 Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.587569 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-8ff95898-h254r"] Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.732117 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert\") pod \"infra-operator-controller-manager-858cd69f49-lxvwh\" (UID: \"b76e3341-e4f0-4711-95cc-874919666585\") " pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.740572 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b76e3341-e4f0-4711-95cc-874919666585-cert\") pod \"infra-operator-controller-manager-858cd69f49-lxvwh\" (UID: \"b76e3341-e4f0-4711-95cc-874919666585\") " pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.742380 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2"] Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.752790 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g"] Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.757065 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7"] Sep 29 13:57:53 crc kubenswrapper[4869]: W0929 13:57:53.759950 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8667518f_048c_48b3_b838_3ff38cbc76b7.slice/crio-2a92fae5a1618192efb37dd922352af7b6f795eabe1e3d485bfac14024ad6eb8 WatchSource:0}: Error finding container 2a92fae5a1618192efb37dd922352af7b6f795eabe1e3d485bfac14024ad6eb8: Status 404 returned error can't find the container with id 2a92fae5a1618192efb37dd922352af7b6f795eabe1e3d485bfac14024ad6eb8 Sep 29 13:57:53 crc kubenswrapper[4869]: W0929 13:57:53.761015 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbfc73681_1e32_4e79_818f_944b609ef92b.slice/crio-02a273220a7c80b9a432652cd302778eab2171367a315ae4f8bd0ee76f43739a WatchSource:0}: Error finding container 02a273220a7c80b9a432652cd302778eab2171367a315ae4f8bd0ee76f43739a: Status 404 returned error can't find the container with id 02a273220a7c80b9a432652cd302778eab2171367a315ae4f8bd0ee76f43739a Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.771394 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w"] Sep 29 13:57:53 crc kubenswrapper[4869]: W0929 13:57:53.779013 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc13c38cf_c074_4e90_a79f_58ad4a24db6e.slice/crio-cc26747216ed307e72cac9c9f23c70156caad4b8c8cb9f5c92e8b87c98b8a683 WatchSource:0}: Error finding container cc26747216ed307e72cac9c9f23c70156caad4b8c8cb9f5c92e8b87c98b8a683: Status 404 returned error can't find the container with id cc26747216ed307e72cac9c9f23c70156caad4b8c8cb9f5c92e8b87c98b8a683 Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.931826 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" event={"ID":"d7f3dd6f-b5bd-482e-a41a-e426459a8bfd","Type":"ContainerStarted","Data":"bb0e2699af15c84ed81873dcc363b56e1627eb4e8751ccb7b8a0accb3c548987"} Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.935580 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" event={"ID":"f719d8af-e87a-4f02-bd10-fe1a2899b71d","Type":"ContainerStarted","Data":"7ca1a07cbf673f1846523f1fbc4526156c0da21c02f64429e20a7618dee261e8"} Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.937904 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" event={"ID":"e2386607-ebad-4616-9e23-d81e2c64350c","Type":"ContainerStarted","Data":"065ce47b32dbcbfd30d89d11823a75888ba2dd9f66ae535f71a1aaa12b7b3374"} Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.945230 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" event={"ID":"17b6d42c-924b-48c9-9a78-13cb3a8d7776","Type":"ContainerStarted","Data":"092e131a5e92064e1a43102fe9f170ee01971f1313b1071c42c5cde22b9030e9"} Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.948078 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" event={"ID":"8667518f-048c-48b3-b838-3ff38cbc76b7","Type":"ContainerStarted","Data":"2a92fae5a1618192efb37dd922352af7b6f795eabe1e3d485bfac14024ad6eb8"} Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.950878 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" event={"ID":"bfc73681-1e32-4e79-818f-944b609ef92b","Type":"ContainerStarted","Data":"02a273220a7c80b9a432652cd302778eab2171367a315ae4f8bd0ee76f43739a"} Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.956022 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" event={"ID":"c13c38cf-c074-4e90-a79f-58ad4a24db6e","Type":"ContainerStarted","Data":"cc26747216ed307e72cac9c9f23c70156caad4b8c8cb9f5c92e8b87c98b8a683"} Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.957653 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" event={"ID":"7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0","Type":"ContainerStarted","Data":"3751fec20691dfcb8534f5f0af599c28cf6f4f16b81ad57455da40bdb9aafe71"} Sep 29 13:57:53 crc kubenswrapper[4869]: I0929 13:57:53.958869 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" event={"ID":"43f73602-1bf6-4550-bad6-ce9cedaa6955","Type":"ContainerStarted","Data":"d7ebc610052a4347fd1ab75e7056b1a0cd2c5d977fa1fbeea9423c699d768b74"} Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.037451 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.138252 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g"] Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.156179 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-jq4lw\" (UID: \"cc44f555-b327-4135-a59c-5c085be0ca2e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.156345 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw"] Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.166219 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cc44f555-b327-4135-a59c-5c085be0ca2e-cert\") pod \"openstack-baremetal-operator-controller-manager-6d776955-jq4lw\" (UID: \"cc44f555-b327-4135-a59c-5c085be0ca2e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.168924 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g"] Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.178377 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx"] Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.194957 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55"] Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.207206 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x"] Sep 29 13:57:54 crc kubenswrapper[4869]: W0929 13:57:54.211635 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2716f7dc_4fa9_46a8_abcb_71016098e732.slice/crio-91cdbb5a4b523881029f8bf1bd2d3b8cf50ff1923a826d09f2811cfc113e834e WatchSource:0}: Error finding container 91cdbb5a4b523881029f8bf1bd2d3b8cf50ff1923a826d09f2811cfc113e834e: Status 404 returned error can't find the container with id 91cdbb5a4b523881029f8bf1bd2d3b8cf50ff1923a826d09f2811cfc113e834e Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.217459 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm"] Sep 29 13:57:54 crc kubenswrapper[4869]: W0929 13:57:54.249729 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa4f1345_cc28_491e_876c_a125e73b4a8b.slice/crio-cd36299dca954650a07646c44282f90dc933f165858e9229bbfec19f79f0bed2 WatchSource:0}: Error finding container cd36299dca954650a07646c44282f90dc933f165858e9229bbfec19f79f0bed2: Status 404 returned error can't find the container with id cd36299dca954650a07646c44282f90dc933f165858e9229bbfec19f79f0bed2 Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.293708 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:bdf49c202aba5000737445bc4aeee6c5cdc6dd29c3dcd1394df9f8695830f9c6,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hdzzp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-54d766c9f9-ggvvf_openstack-operators(6f08d81a-137d-43b2-8c78-8227d4cd848c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 29 13:57:54 crc kubenswrapper[4869]: W0929 13:57:54.295266 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbc76592b_e1dc_41f2_8696_8edd7e3d8315.slice/crio-ffa03bbe3f066027cffe1d4ea0e7838877c4d2a671c4a2e5ca9e79c15cecefd6 WatchSource:0}: Error finding container ffa03bbe3f066027cffe1d4ea0e7838877c4d2a671c4a2e5ca9e79c15cecefd6: Status 404 returned error can't find the container with id ffa03bbe3f066027cffe1d4ea0e7838877c4d2a671c4a2e5ca9e79c15cecefd6 Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.296945 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:057de94f9afa340adc34f9b25f8007d9cd2ba71bc8b5d77aac522add53b7caef,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zxgsg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-c7c776c96-v9wbw_openstack-operators(2a0fb7a7-5469-4513-ac6f-3ce8f28b9310): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.299155 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.307952 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3c6f7d737e0196ec302f44354228d783ad3b210a75703dda3b39c15c01a67e8c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vnx7l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-bc7dc7bd9-59sjm_openstack-operators(bc76592b-e1dc-41f2-8696-8edd7e3d8315): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.312041 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf"] Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.344358 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:4d08afd31dc5ded10c54a5541f514ac351e9b40a183285b3db27d0757a6354c8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7v2lx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-76fcc6dc7c-dlvlw_openstack-operators(ad5406a4-1938-49ee-87ef-cae347abba83): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.351659 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm"] Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.362259 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg"] Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.371771 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw"] Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.413778 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:225524223bf2a7f3a4ce95958fc9ca6fdab02745fb70374e8ff5bf1ddaceda4b,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pv5hm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-79d8469568-j4gqm_openstack-operators(e73fced4-cf05-4c8d-b1af-39c07ef69514): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.415878 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" podUID="e73fced4-cf05-4c8d-b1af-39c07ef69514" Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.636848 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh"] Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.768910 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" podUID="6f08d81a-137d-43b2-8c78-8227d4cd848c" Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.866135 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" podUID="bc76592b-e1dc-41f2-8696-8edd7e3d8315" Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.870670 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" podUID="ad5406a4-1938-49ee-87ef-cae347abba83" Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.937897 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" podUID="2a0fb7a7-5469-4513-ac6f-3ce8f28b9310" Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.984378 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" event={"ID":"bc76592b-e1dc-41f2-8696-8edd7e3d8315","Type":"ContainerStarted","Data":"6bf9d6dbe6a4555009c539cf1f01cad42cbb3b79fd6474d2727940f55e5ce714"} Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.984443 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" event={"ID":"bc76592b-e1dc-41f2-8696-8edd7e3d8315","Type":"ContainerStarted","Data":"ffa03bbe3f066027cffe1d4ea0e7838877c4d2a671c4a2e5ca9e79c15cecefd6"} Sep 29 13:57:54 crc kubenswrapper[4869]: I0929 13:57:54.986544 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" event={"ID":"436fe0ad-2010-46f2-ad86-ee243370d675","Type":"ContainerStarted","Data":"27e28506f02d5828a3d86815f538749f48b5ac41aaa3a41703e38a3ed8d4f491"} Sep 29 13:57:54 crc kubenswrapper[4869]: E0929 13:57:54.986732 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3c6f7d737e0196ec302f44354228d783ad3b210a75703dda3b39c15c01a67e8c\\\"\"" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" podUID="bc76592b-e1dc-41f2-8696-8edd7e3d8315" Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.027116 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" event={"ID":"2a0fb7a7-5469-4513-ac6f-3ce8f28b9310","Type":"ContainerStarted","Data":"153856def79a90ae3690986750bba204deea9bc0fd305db74fbdc0161b79c2c9"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.027170 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" event={"ID":"2a0fb7a7-5469-4513-ac6f-3ce8f28b9310","Type":"ContainerStarted","Data":"28847f374a07df729a962e183eae0dc055429283273912193331d6e290253022"} Sep 29 13:57:55 crc kubenswrapper[4869]: E0929 13:57:55.032152 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:057de94f9afa340adc34f9b25f8007d9cd2ba71bc8b5d77aac522add53b7caef\\\"\"" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" podUID="2a0fb7a7-5469-4513-ac6f-3ce8f28b9310" Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.033544 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" event={"ID":"e73fced4-cf05-4c8d-b1af-39c07ef69514","Type":"ContainerStarted","Data":"d31653200170a51c7341334ae543667be95e580bef0245be51c9fa15fe8765aa"} Sep 29 13:57:55 crc kubenswrapper[4869]: E0929 13:57:55.035077 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:225524223bf2a7f3a4ce95958fc9ca6fdab02745fb70374e8ff5bf1ddaceda4b\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" podUID="e73fced4-cf05-4c8d-b1af-39c07ef69514" Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.044858 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" event={"ID":"ad5406a4-1938-49ee-87ef-cae347abba83","Type":"ContainerStarted","Data":"b7e1bdc9446c6f6af6322a6001b1432d0929d15900c5b79810f1a94a764e103c"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.044918 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" event={"ID":"ad5406a4-1938-49ee-87ef-cae347abba83","Type":"ContainerStarted","Data":"3a05a5d4b2bea85a335ff41cc4b12756bcf2bac83195cecae5ffd23895f47bf2"} Sep 29 13:57:55 crc kubenswrapper[4869]: E0929 13:57:55.060704 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:4d08afd31dc5ded10c54a5541f514ac351e9b40a183285b3db27d0757a6354c8\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" podUID="ad5406a4-1938-49ee-87ef-cae347abba83" Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.068837 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" event={"ID":"b76e3341-e4f0-4711-95cc-874919666585","Type":"ContainerStarted","Data":"826dfbbe192be908e9575f9a82f9f9e5ef2fd296ff8adbfc2550e8f7351fdb0b"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.080441 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" event={"ID":"6f08d81a-137d-43b2-8c78-8227d4cd848c","Type":"ContainerStarted","Data":"9f6e81176a04962b3d1d0c9d3dea419f339c302c2bf6f793758ae23f2e9f16a0"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.080496 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" event={"ID":"6f08d81a-137d-43b2-8c78-8227d4cd848c","Type":"ContainerStarted","Data":"7772f450d37f5995d4b67baefbe660a4ac442f0bb1fe685d200e7a476e144859"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.081826 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" event={"ID":"9eb7b21e-54fc-4b89-977a-6ad6481e7237","Type":"ContainerStarted","Data":"5574dafdfe03db60618f3d4c8c146ea45b7a57b39682aa30c78d3adb8255941b"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.083312 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" event={"ID":"aa4f1345-cc28-491e-876c-a125e73b4a8b","Type":"ContainerStarted","Data":"cd36299dca954650a07646c44282f90dc933f165858e9229bbfec19f79f0bed2"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.086368 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" event={"ID":"6f94106a-7f34-4265-a988-c90ac7466919","Type":"ContainerStarted","Data":"b891d2771be6379681065b835487a673eee2e677708c2ac671598e3ca419c9cc"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.090843 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" event={"ID":"2716f7dc-4fa9-46a8-abcb-71016098e732","Type":"ContainerStarted","Data":"91cdbb5a4b523881029f8bf1bd2d3b8cf50ff1923a826d09f2811cfc113e834e"} Sep 29 13:57:55 crc kubenswrapper[4869]: E0929 13:57:55.101407 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:bdf49c202aba5000737445bc4aeee6c5cdc6dd29c3dcd1394df9f8695830f9c6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" podUID="6f08d81a-137d-43b2-8c78-8227d4cd848c" Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.106190 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" event={"ID":"a923032e-f8f0-4622-8de9-01973ec22782","Type":"ContainerStarted","Data":"0d723ef8a5f77e11f431a40757eb93f7f219f056ea36932e353a440b634ba725"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.106462 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" event={"ID":"a923032e-f8f0-4622-8de9-01973ec22782","Type":"ContainerStarted","Data":"80e9734fa91976c969ed95b35e9f09af9f19f87090071f66f12b0c589db0b821"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.106478 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" event={"ID":"a923032e-f8f0-4622-8de9-01973ec22782","Type":"ContainerStarted","Data":"53b406af03afda6826f92784e21e1ab01c8c59ca990e26110ac21ecafaa82846"} Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.107142 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.160581 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw"] Sep 29 13:57:55 crc kubenswrapper[4869]: I0929 13:57:55.237126 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" podStartSLOduration=3.237101565 podStartE2EDuration="3.237101565s" podCreationTimestamp="2025-09-29 13:57:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:57:55.227209939 +0000 UTC m=+1001.667854259" watchObservedRunningTime="2025-09-29 13:57:55.237101565 +0000 UTC m=+1001.677745885" Sep 29 13:57:55 crc kubenswrapper[4869]: W0929 13:57:55.263263 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc44f555_b327_4135_a59c_5c085be0ca2e.slice/crio-d206e044199821acbed1f2dd7af29b39b31a9eee62f47e42b568b0a3210c24f6 WatchSource:0}: Error finding container d206e044199821acbed1f2dd7af29b39b31a9eee62f47e42b568b0a3210c24f6: Status 404 returned error can't find the container with id d206e044199821acbed1f2dd7af29b39b31a9eee62f47e42b568b0a3210c24f6 Sep 29 13:57:56 crc kubenswrapper[4869]: I0929 13:57:56.136960 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" event={"ID":"cc44f555-b327-4135-a59c-5c085be0ca2e","Type":"ContainerStarted","Data":"d206e044199821acbed1f2dd7af29b39b31a9eee62f47e42b568b0a3210c24f6"} Sep 29 13:57:56 crc kubenswrapper[4869]: E0929 13:57:56.141176 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3c6f7d737e0196ec302f44354228d783ad3b210a75703dda3b39c15c01a67e8c\\\"\"" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" podUID="bc76592b-e1dc-41f2-8696-8edd7e3d8315" Sep 29 13:57:56 crc kubenswrapper[4869]: E0929 13:57:56.141323 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:4d08afd31dc5ded10c54a5541f514ac351e9b40a183285b3db27d0757a6354c8\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" podUID="ad5406a4-1938-49ee-87ef-cae347abba83" Sep 29 13:57:56 crc kubenswrapper[4869]: E0929 13:57:56.141397 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:bdf49c202aba5000737445bc4aeee6c5cdc6dd29c3dcd1394df9f8695830f9c6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" podUID="6f08d81a-137d-43b2-8c78-8227d4cd848c" Sep 29 13:57:56 crc kubenswrapper[4869]: E0929 13:57:56.142492 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:057de94f9afa340adc34f9b25f8007d9cd2ba71bc8b5d77aac522add53b7caef\\\"\"" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" podUID="2a0fb7a7-5469-4513-ac6f-3ce8f28b9310" Sep 29 13:57:56 crc kubenswrapper[4869]: E0929 13:57:56.144315 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:225524223bf2a7f3a4ce95958fc9ca6fdab02745fb70374e8ff5bf1ddaceda4b\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" podUID="e73fced4-cf05-4c8d-b1af-39c07ef69514" Sep 29 13:58:03 crc kubenswrapper[4869]: I0929 13:58:03.193206 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-cdbfd4cbb-hbh55" Sep 29 13:58:07 crc kubenswrapper[4869]: E0929 13:58:07.295804 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:b98ec0b50404626e0440bcf2e22f8d7ff06d1b1bd99f01830bceb8a2b27aa094" Sep 29 13:58:07 crc kubenswrapper[4869]: E0929 13:58:07.296071 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:b98ec0b50404626e0440bcf2e22f8d7ff06d1b1bd99f01830bceb8a2b27aa094,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wrl6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-9fc8d5567-lsch2_openstack-operators(bfc73681-1e32-4e79-818f-944b609ef92b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 13:58:07 crc kubenswrapper[4869]: E0929 13:58:07.743704 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:f3d8f19fdacecd967319b843a048e3be334bc69f486bd64b56238e90e5ce461a" Sep 29 13:58:07 crc kubenswrapper[4869]: E0929 13:58:07.744017 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:f3d8f19fdacecd967319b843a048e3be334bc69f486bd64b56238e90e5ce461a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xk689,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-7d74f4d695-xxmd5_openstack-operators(e2386607-ebad-4616-9e23-d81e2c64350c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 13:58:13 crc kubenswrapper[4869]: E0929 13:58:13.018521 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:87a522d480797f54499bcd1c4a48837e1b17c33d4cc43e99ed7a53b8cedb17c7" Sep 29 13:58:13 crc kubenswrapper[4869]: E0929 13:58:13.019795 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:87a522d480797f54499bcd1c4a48837e1b17c33d4cc43e99ed7a53b8cedb17c7,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kp42b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-858cd69f49-lxvwh_openstack-operators(b76e3341-e4f0-4711-95cc-874919666585): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 13:58:14 crc kubenswrapper[4869]: E0929 13:58:14.228253 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:d2eba62b82728578c57f60de5baa3562bc0a355f65123a9e5fedff385988eb64" Sep 29 13:58:14 crc kubenswrapper[4869]: E0929 13:58:14.228587 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:d2eba62b82728578c57f60de5baa3562bc0a355f65123a9e5fedff385988eb64,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hx7pf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-56cf9c6b99-2lgbz_openstack-operators(7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 13:58:14 crc kubenswrapper[4869]: E0929 13:58:14.500582 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" podUID="bfc73681-1e32-4e79-818f-944b609ef92b" Sep 29 13:58:14 crc kubenswrapper[4869]: E0929 13:58:14.747224 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" podUID="b76e3341-e4f0-4711-95cc-874919666585" Sep 29 13:58:14 crc kubenswrapper[4869]: E0929 13:58:14.827719 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" podUID="e2386607-ebad-4616-9e23-d81e2c64350c" Sep 29 13:58:14 crc kubenswrapper[4869]: E0929 13:58:14.946754 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" podUID="7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0" Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.378357 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" event={"ID":"6f94106a-7f34-4265-a988-c90ac7466919","Type":"ContainerStarted","Data":"97c3a37ed30d51c631d4e5a3261938bb58a6d6fce7377f3e5887025643793fa5"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.393439 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" event={"ID":"f719d8af-e87a-4f02-bd10-fe1a2899b71d","Type":"ContainerStarted","Data":"28716f814eda60fafab9d179021140b180face528d2aae3274a1be257adc2037"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.445664 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" event={"ID":"cc44f555-b327-4135-a59c-5c085be0ca2e","Type":"ContainerStarted","Data":"f83f05cff598f49153730afc8705ff86d3e97daa94147e71fea5e83c586615c2"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.454669 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" event={"ID":"e2386607-ebad-4616-9e23-d81e2c64350c","Type":"ContainerStarted","Data":"ea6d376ba7b0fe4a990f77f2947f5cc8dda426a12bd55df4b6baa0f8e6b619ef"} Sep 29 13:58:15 crc kubenswrapper[4869]: E0929 13:58:15.474566 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:f3d8f19fdacecd967319b843a048e3be334bc69f486bd64b56238e90e5ce461a\\\"\"" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" podUID="e2386607-ebad-4616-9e23-d81e2c64350c" Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.505017 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" event={"ID":"43f73602-1bf6-4550-bad6-ce9cedaa6955","Type":"ContainerStarted","Data":"6450b639c984b7b44290b194143ee2e2b969a946879c95e26e4fc42e058383f6"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.523153 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" event={"ID":"bfc73681-1e32-4e79-818f-944b609ef92b","Type":"ContainerStarted","Data":"94a92c28f628b6ea4e63662a2348da3cdf98f81a5648b5c9d0fba220445acfeb"} Sep 29 13:58:15 crc kubenswrapper[4869]: E0929 13:58:15.536840 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:b98ec0b50404626e0440bcf2e22f8d7ff06d1b1bd99f01830bceb8a2b27aa094\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" podUID="bfc73681-1e32-4e79-818f-944b609ef92b" Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.538022 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" event={"ID":"b76e3341-e4f0-4711-95cc-874919666585","Type":"ContainerStarted","Data":"55e48f838d637b3cdc126094adbd77459072490b5773437d3006a49bbe239958"} Sep 29 13:58:15 crc kubenswrapper[4869]: E0929 13:58:15.543559 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:87a522d480797f54499bcd1c4a48837e1b17c33d4cc43e99ed7a53b8cedb17c7\\\"\"" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" podUID="b76e3341-e4f0-4711-95cc-874919666585" Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.554915 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" event={"ID":"7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0","Type":"ContainerStarted","Data":"b1512fc4a002a2754561a1246ecd4b14aed522cd6d837c05eb1e10574805776c"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.584231 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" event={"ID":"6ed6de20-a7c3-40be-bc68-ff4f978dba14","Type":"ContainerStarted","Data":"3b2abe9ecfa5c2e701fae08b3870525ec5d5656e7d57ba36a34dbc93bad92382"} Sep 29 13:58:15 crc kubenswrapper[4869]: E0929 13:58:15.593280 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:d2eba62b82728578c57f60de5baa3562bc0a355f65123a9e5fedff385988eb64\\\"\"" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" podUID="7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0" Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.605888 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" event={"ID":"436fe0ad-2010-46f2-ad86-ee243370d675","Type":"ContainerStarted","Data":"8799762865a08033a6114c352a1ef53551ecbdb1269fe23e0730333ec14a77b4"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.621904 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" event={"ID":"8667518f-048c-48b3-b838-3ff38cbc76b7","Type":"ContainerStarted","Data":"06a7e94a53cde96c2cc61bcea6718349386b1877f2346e7393227b4e163f0159"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.658229 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" event={"ID":"c13c38cf-c074-4e90-a79f-58ad4a24db6e","Type":"ContainerStarted","Data":"c86ee42bcbb29728ba2c6dc3a3def772cf6749bb86050426671a862f08a20ee6"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.680745 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" event={"ID":"9eb7b21e-54fc-4b89-977a-6ad6481e7237","Type":"ContainerStarted","Data":"97fa52ce899c31a131195ca584e0260e6b68bc6ecf3856b7d0d7b604085f5dcc"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.722099 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" event={"ID":"aa4f1345-cc28-491e-876c-a125e73b4a8b","Type":"ContainerStarted","Data":"40f11efba2c8e129e0cf51b13c558381c5d11003368e88d815df70f5f035f93f"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.746330 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" event={"ID":"17b6d42c-924b-48c9-9a78-13cb3a8d7776","Type":"ContainerStarted","Data":"161bdf7589e54c29249ffb76fd9774f2b850ffc8bbf3bba8411b36eb4973f5f2"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.789914 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" event={"ID":"d7f3dd6f-b5bd-482e-a41a-e426459a8bfd","Type":"ContainerStarted","Data":"8c43276eb4d62cdd47e0c8b302960c5ad9999692075f51ac344c89a4277f1e1a"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.789987 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" event={"ID":"d7f3dd6f-b5bd-482e-a41a-e426459a8bfd","Type":"ContainerStarted","Data":"3450b2a3f8a1971b4858e10c9dbdbd3734b5e54797013049708a98927dbd08f4"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.791148 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.805924 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" event={"ID":"2716f7dc-4fa9-46a8-abcb-71016098e732","Type":"ContainerStarted","Data":"6295ba7e39a85e622d558c2778685c8406091af54c73c72da3fff09da72c7c8f"} Sep 29 13:58:15 crc kubenswrapper[4869]: I0929 13:58:15.829018 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" podStartSLOduration=4.261895296 podStartE2EDuration="24.828990953s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.753662645 +0000 UTC m=+1000.194306965" lastFinishedPulling="2025-09-29 13:58:14.320758302 +0000 UTC m=+1020.761402622" observedRunningTime="2025-09-29 13:58:15.827229738 +0000 UTC m=+1022.267874058" watchObservedRunningTime="2025-09-29 13:58:15.828990953 +0000 UTC m=+1022.269635273" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.842923 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" event={"ID":"436fe0ad-2010-46f2-ad86-ee243370d675","Type":"ContainerStarted","Data":"fbd6f3bce7e3fc10e01b5ab4a8fe409580e076514b8c6c09a4c578256268f055"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.843635 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.854268 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" event={"ID":"cc44f555-b327-4135-a59c-5c085be0ca2e","Type":"ContainerStarted","Data":"d0ec880648c24ca627fe9d74cd573b6bacfac9aa12d3b89f93a36fb61097d2a0"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.858145 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" event={"ID":"f719d8af-e87a-4f02-bd10-fe1a2899b71d","Type":"ContainerStarted","Data":"ae278fcebf2df1c6f8c3344311e2ab32c0b80099296bec4c8c653dcb09eab3f2"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.858255 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.861575 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" event={"ID":"43f73602-1bf6-4550-bad6-ce9cedaa6955","Type":"ContainerStarted","Data":"2b4d0fff23131a940d31ddbcf355f3d386815aaeb7fc854cab37e1c7c037e588"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.862523 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.864413 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" event={"ID":"17b6d42c-924b-48c9-9a78-13cb3a8d7776","Type":"ContainerStarted","Data":"c9bf10962dd2edb06feb116dc894966bd07e6a2bf1d554e460e355ea36d6e139"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.864557 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.875070 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" event={"ID":"6ed6de20-a7c3-40be-bc68-ff4f978dba14","Type":"ContainerStarted","Data":"ec61a7635f8172eeada85bb49d6e7b127fbfaf77073f7a8037d422f3e8eb9fa3"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.875633 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.879542 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" event={"ID":"6f94106a-7f34-4265-a988-c90ac7466919","Type":"ContainerStarted","Data":"f03381bf50f78307995db69ab48679a41b5f05b117fba9704b3053c97d9db952"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.879698 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.881185 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" event={"ID":"aa4f1345-cc28-491e-876c-a125e73b4a8b","Type":"ContainerStarted","Data":"99b3a1e30d4e998e038be0854ba01bfa3d41c358d2d33f4f2be0ad2f119f33cc"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.881410 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.895275 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" podStartSLOduration=4.792934634 podStartE2EDuration="24.895242724s" podCreationTimestamp="2025-09-29 13:57:52 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.227841114 +0000 UTC m=+1000.668485434" lastFinishedPulling="2025-09-29 13:58:14.330149204 +0000 UTC m=+1020.770793524" observedRunningTime="2025-09-29 13:58:16.868438873 +0000 UTC m=+1023.309083203" watchObservedRunningTime="2025-09-29 13:58:16.895242724 +0000 UTC m=+1023.335887044" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.896804 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" event={"ID":"c13c38cf-c074-4e90-a79f-58ad4a24db6e","Type":"ContainerStarted","Data":"2a77576be9768b9eee1afba16499b3fb917ccc3849606a8144b15a0287ea831b"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.897744 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.901407 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" podStartSLOduration=5.174796788 podStartE2EDuration="25.901378863s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.583737888 +0000 UTC m=+1000.024382208" lastFinishedPulling="2025-09-29 13:58:14.310319973 +0000 UTC m=+1020.750964283" observedRunningTime="2025-09-29 13:58:16.887931406 +0000 UTC m=+1023.328575736" watchObservedRunningTime="2025-09-29 13:58:16.901378863 +0000 UTC m=+1023.342023183" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.904078 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" event={"ID":"9eb7b21e-54fc-4b89-977a-6ad6481e7237","Type":"ContainerStarted","Data":"4e8c7be84326fbadf86d1ca1c904b1fac9955863c129dcf70a2ea5dc153f040b"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.904642 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.911294 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" event={"ID":"2716f7dc-4fa9-46a8-abcb-71016098e732","Type":"ContainerStarted","Data":"904c8e4d99042a628ef3a8d3331d8a55c3bcd6f5c281c16fc4a32b74d9b1524d"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.911429 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.923724 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" event={"ID":"8667518f-048c-48b3-b838-3ff38cbc76b7","Type":"ContainerStarted","Data":"def5e98c858afefcedcbcc1c912bd8364bef95864790e61c8b83174c895d4c16"} Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.923774 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" Sep 29 13:58:16 crc kubenswrapper[4869]: E0929 13:58:16.927763 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:f3d8f19fdacecd967319b843a048e3be334bc69f486bd64b56238e90e5ce461a\\\"\"" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" podUID="e2386607-ebad-4616-9e23-d81e2c64350c" Sep 29 13:58:16 crc kubenswrapper[4869]: E0929 13:58:16.927896 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:b98ec0b50404626e0440bcf2e22f8d7ff06d1b1bd99f01830bceb8a2b27aa094\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" podUID="bfc73681-1e32-4e79-818f-944b609ef92b" Sep 29 13:58:16 crc kubenswrapper[4869]: E0929 13:58:16.927986 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:d2eba62b82728578c57f60de5baa3562bc0a355f65123a9e5fedff385988eb64\\\"\"" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" podUID="7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0" Sep 29 13:58:16 crc kubenswrapper[4869]: E0929 13:58:16.928036 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:87a522d480797f54499bcd1c4a48837e1b17c33d4cc43e99ed7a53b8cedb17c7\\\"\"" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" podUID="b76e3341-e4f0-4711-95cc-874919666585" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.958232 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" podStartSLOduration=5.185447304 podStartE2EDuration="25.95820695s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.547815781 +0000 UTC m=+999.988460101" lastFinishedPulling="2025-09-29 13:58:14.320575427 +0000 UTC m=+1020.761219747" observedRunningTime="2025-09-29 13:58:16.946851277 +0000 UTC m=+1023.387495617" watchObservedRunningTime="2025-09-29 13:58:16.95820695 +0000 UTC m=+1023.398851270" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.959869 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" podStartSLOduration=6.931889942 podStartE2EDuration="25.959862732s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:55.289732733 +0000 UTC m=+1001.730377053" lastFinishedPulling="2025-09-29 13:58:14.317705523 +0000 UTC m=+1020.758349843" observedRunningTime="2025-09-29 13:58:16.925762802 +0000 UTC m=+1023.366407122" watchObservedRunningTime="2025-09-29 13:58:16.959862732 +0000 UTC m=+1023.400507052" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.973140 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" podStartSLOduration=5.226552444 podStartE2EDuration="25.973115194s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.569434699 +0000 UTC m=+1000.010079019" lastFinishedPulling="2025-09-29 13:58:14.315997449 +0000 UTC m=+1020.756641769" observedRunningTime="2025-09-29 13:58:16.962462739 +0000 UTC m=+1023.403107049" watchObservedRunningTime="2025-09-29 13:58:16.973115194 +0000 UTC m=+1023.413759514" Sep 29 13:58:16 crc kubenswrapper[4869]: I0929 13:58:16.989245 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" podStartSLOduration=5.43637567 podStartE2EDuration="25.98922374s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.766427384 +0000 UTC m=+1000.207071704" lastFinishedPulling="2025-09-29 13:58:14.319275454 +0000 UTC m=+1020.759919774" observedRunningTime="2025-09-29 13:58:16.982105177 +0000 UTC m=+1023.422749507" watchObservedRunningTime="2025-09-29 13:58:16.98922374 +0000 UTC m=+1023.429868060" Sep 29 13:58:17 crc kubenswrapper[4869]: I0929 13:58:17.004432 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" podStartSLOduration=4.890864602 podStartE2EDuration="25.004411892s" podCreationTimestamp="2025-09-29 13:57:52 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.216353288 +0000 UTC m=+1000.656997608" lastFinishedPulling="2025-09-29 13:58:14.329900588 +0000 UTC m=+1020.770544898" observedRunningTime="2025-09-29 13:58:17.002274237 +0000 UTC m=+1023.442918557" watchObservedRunningTime="2025-09-29 13:58:17.004411892 +0000 UTC m=+1023.445056212" Sep 29 13:58:17 crc kubenswrapper[4869]: I0929 13:58:17.028961 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" podStartSLOduration=5.481973928 podStartE2EDuration="26.028858323s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.782025727 +0000 UTC m=+1000.222670047" lastFinishedPulling="2025-09-29 13:58:14.328910132 +0000 UTC m=+1020.769554442" observedRunningTime="2025-09-29 13:58:17.025996359 +0000 UTC m=+1023.466640679" watchObservedRunningTime="2025-09-29 13:58:17.028858323 +0000 UTC m=+1023.469502643" Sep 29 13:58:17 crc kubenswrapper[4869]: I0929 13:58:17.067095 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" podStartSLOduration=5.017124101 podStartE2EDuration="25.06706926s" podCreationTimestamp="2025-09-29 13:57:52 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.280459322 +0000 UTC m=+1000.721103642" lastFinishedPulling="2025-09-29 13:58:14.330404481 +0000 UTC m=+1020.771048801" observedRunningTime="2025-09-29 13:58:17.062470031 +0000 UTC m=+1023.503114351" watchObservedRunningTime="2025-09-29 13:58:17.06706926 +0000 UTC m=+1023.507713580" Sep 29 13:58:17 crc kubenswrapper[4869]: I0929 13:58:17.080552 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" podStartSLOduration=4.720489381 podStartE2EDuration="26.080533427s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:52.943071181 +0000 UTC m=+999.383715501" lastFinishedPulling="2025-09-29 13:58:14.303115237 +0000 UTC m=+1020.743759547" observedRunningTime="2025-09-29 13:58:17.078126415 +0000 UTC m=+1023.518770725" watchObservedRunningTime="2025-09-29 13:58:17.080533427 +0000 UTC m=+1023.521177747" Sep 29 13:58:17 crc kubenswrapper[4869]: I0929 13:58:17.100821 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" podStartSLOduration=4.9720468570000005 podStartE2EDuration="25.10079992s" podCreationTimestamp="2025-09-29 13:57:52 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.200397456 +0000 UTC m=+1000.641041776" lastFinishedPulling="2025-09-29 13:58:14.329150519 +0000 UTC m=+1020.769794839" observedRunningTime="2025-09-29 13:58:17.094133748 +0000 UTC m=+1023.534778068" watchObservedRunningTime="2025-09-29 13:58:17.10079992 +0000 UTC m=+1023.541444240" Sep 29 13:58:17 crc kubenswrapper[4869]: I0929 13:58:17.112830 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" podStartSLOduration=5.206891669 podStartE2EDuration="25.11280893s" podCreationTimestamp="2025-09-29 13:57:52 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.413149487 +0000 UTC m=+1000.853793807" lastFinishedPulling="2025-09-29 13:58:14.319066748 +0000 UTC m=+1020.759711068" observedRunningTime="2025-09-29 13:58:17.112072781 +0000 UTC m=+1023.552717101" watchObservedRunningTime="2025-09-29 13:58:17.11280893 +0000 UTC m=+1023.553453250" Sep 29 13:58:17 crc kubenswrapper[4869]: I0929 13:58:17.935150 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.959241 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" event={"ID":"6f08d81a-137d-43b2-8c78-8227d4cd848c","Type":"ContainerStarted","Data":"328c5fdf0da1c9aeee98c21437f6cf062eefbbd39cde8712818143cf2c7511bb"} Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.959813 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.961897 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" event={"ID":"2a0fb7a7-5469-4513-ac6f-3ce8f28b9310","Type":"ContainerStarted","Data":"5e414cf771fcb67248d235d5e9f899c7dcc8c579009f63caad4fff7d01667321"} Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.962058 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.964109 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" event={"ID":"ad5406a4-1938-49ee-87ef-cae347abba83","Type":"ContainerStarted","Data":"202eea0a5be7a6f695df77a05feac7c2ae7b1d50dcf8b2a92d1bbc2fad29ea30"} Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.964350 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.966134 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" event={"ID":"e73fced4-cf05-4c8d-b1af-39c07ef69514","Type":"ContainerStarted","Data":"c24b5f45fe900b2bf4bc0f0569c206ec931b39e5cd593341709fca9249bdfeac"} Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.969206 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" event={"ID":"bc76592b-e1dc-41f2-8696-8edd7e3d8315","Type":"ContainerStarted","Data":"1ea0e3edde35c6c24c93b230f091c749be573ad0b58130be0174e1cb6b95c287"} Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.969534 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" Sep 29 13:58:20 crc kubenswrapper[4869]: I0929 13:58:20.986060 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" podStartSLOduration=4.451866219 podStartE2EDuration="29.986019066s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.293434817 +0000 UTC m=+1000.734079137" lastFinishedPulling="2025-09-29 13:58:19.827587654 +0000 UTC m=+1026.268231984" observedRunningTime="2025-09-29 13:58:20.98387861 +0000 UTC m=+1027.424522930" watchObservedRunningTime="2025-09-29 13:58:20.986019066 +0000 UTC m=+1027.426663376" Sep 29 13:58:21 crc kubenswrapper[4869]: I0929 13:58:21.009892 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" podStartSLOduration=3.414402971 podStartE2EDuration="29.009871351s" podCreationTimestamp="2025-09-29 13:57:52 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.307535951 +0000 UTC m=+1000.748180271" lastFinishedPulling="2025-09-29 13:58:19.903004331 +0000 UTC m=+1026.343648651" observedRunningTime="2025-09-29 13:58:21.00905925 +0000 UTC m=+1027.449703570" watchObservedRunningTime="2025-09-29 13:58:21.009871351 +0000 UTC m=+1027.450515671" Sep 29 13:58:21 crc kubenswrapper[4869]: I0929 13:58:21.026785 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" podStartSLOduration=4.49180132 podStartE2EDuration="30.026764017s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.293838558 +0000 UTC m=+1000.734482878" lastFinishedPulling="2025-09-29 13:58:19.828801255 +0000 UTC m=+1026.269445575" observedRunningTime="2025-09-29 13:58:21.025724081 +0000 UTC m=+1027.466368401" watchObservedRunningTime="2025-09-29 13:58:21.026764017 +0000 UTC m=+1027.467408337" Sep 29 13:58:21 crc kubenswrapper[4869]: I0929 13:58:21.050714 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" podStartSLOduration=4.567118724 podStartE2EDuration="30.050692315s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.344077745 +0000 UTC m=+1000.784722065" lastFinishedPulling="2025-09-29 13:58:19.827651336 +0000 UTC m=+1026.268295656" observedRunningTime="2025-09-29 13:58:21.047636256 +0000 UTC m=+1027.488280566" watchObservedRunningTime="2025-09-29 13:58:21.050692315 +0000 UTC m=+1027.491336635" Sep 29 13:58:21 crc kubenswrapper[4869]: I0929 13:58:21.066438 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-79d8469568-j4gqm" podStartSLOduration=3.56926141 podStartE2EDuration="29.066403601s" podCreationTimestamp="2025-09-29 13:57:52 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.41364239 +0000 UTC m=+1000.854286720" lastFinishedPulling="2025-09-29 13:58:19.910784591 +0000 UTC m=+1026.351428911" observedRunningTime="2025-09-29 13:58:21.065274231 +0000 UTC m=+1027.505918551" watchObservedRunningTime="2025-09-29 13:58:21.066403601 +0000 UTC m=+1027.507047941" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.047965 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-6495d75b5-qq2wr" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.059341 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-748c574d75-shlvx" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.147337 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-67b5d44b7f-57g9g" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.166462 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-8ff95898-h254r" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.209243 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-695847bc78-578dq" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.289017 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7bf498966c-z9j4w" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.557710 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-687b9cf756-l57c7" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.767388 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-5f95c46c78-5wk7g" Sep 29 13:58:22 crc kubenswrapper[4869]: I0929 13:58:22.880468 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-774b97b48-8bn6g" Sep 29 13:58:23 crc kubenswrapper[4869]: I0929 13:58:23.017067 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-5bf96cfbc4-rhc9x" Sep 29 13:58:23 crc kubenswrapper[4869]: I0929 13:58:23.113969 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-f66b554c6-xmgtg" Sep 29 13:58:23 crc kubenswrapper[4869]: I0929 13:58:23.281509 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5f5b8d96d6-ppscx" Sep 29 13:58:24 crc kubenswrapper[4869]: I0929 13:58:24.314593 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6d776955-jq4lw" Sep 29 13:58:28 crc kubenswrapper[4869]: I0929 13:58:28.047377 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" event={"ID":"e2386607-ebad-4616-9e23-d81e2c64350c","Type":"ContainerStarted","Data":"126c096fee9b6f959a4fd99859a765ecbf9c69048e47d5b32208d32a04ca8add"} Sep 29 13:58:28 crc kubenswrapper[4869]: I0929 13:58:28.048567 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" Sep 29 13:58:28 crc kubenswrapper[4869]: I0929 13:58:28.076647 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" podStartSLOduration=2.9626742 podStartE2EDuration="37.076602489s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.580583467 +0000 UTC m=+1000.021227787" lastFinishedPulling="2025-09-29 13:58:27.694511736 +0000 UTC m=+1034.135156076" observedRunningTime="2025-09-29 13:58:28.067730989 +0000 UTC m=+1034.508375329" watchObservedRunningTime="2025-09-29 13:58:28.076602489 +0000 UTC m=+1034.517246809" Sep 29 13:58:30 crc kubenswrapper[4869]: I0929 13:58:30.065976 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" event={"ID":"b76e3341-e4f0-4711-95cc-874919666585","Type":"ContainerStarted","Data":"2f085d713ae90934dd27d1951e30e6e4fc63f17fa74f330b63062ec2da1b43e5"} Sep 29 13:58:30 crc kubenswrapper[4869]: I0929 13:58:30.067191 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:58:30 crc kubenswrapper[4869]: I0929 13:58:30.087485 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" podStartSLOduration=4.025892606 podStartE2EDuration="39.08746281s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:54.641680225 +0000 UTC m=+1001.082324545" lastFinishedPulling="2025-09-29 13:58:29.703250409 +0000 UTC m=+1036.143894749" observedRunningTime="2025-09-29 13:58:30.085205502 +0000 UTC m=+1036.525849862" watchObservedRunningTime="2025-09-29 13:58:30.08746281 +0000 UTC m=+1036.528107130" Sep 29 13:58:31 crc kubenswrapper[4869]: I0929 13:58:31.079468 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" event={"ID":"bfc73681-1e32-4e79-818f-944b609ef92b","Type":"ContainerStarted","Data":"3b547c46b8bb062da48cecb49cf814d168b9300f153abe93792e0c2a9e5f366a"} Sep 29 13:58:31 crc kubenswrapper[4869]: I0929 13:58:31.079840 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" Sep 29 13:58:31 crc kubenswrapper[4869]: I0929 13:58:31.104744 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" podStartSLOduration=3.121282711 podStartE2EDuration="40.104708495s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.771496975 +0000 UTC m=+1000.212141295" lastFinishedPulling="2025-09-29 13:58:30.754922759 +0000 UTC m=+1037.195567079" observedRunningTime="2025-09-29 13:58:31.100169417 +0000 UTC m=+1037.540813747" watchObservedRunningTime="2025-09-29 13:58:31.104708495 +0000 UTC m=+1037.545352825" Sep 29 13:58:32 crc kubenswrapper[4869]: I0929 13:58:32.084194 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d74f4d695-xxmd5" Sep 29 13:58:32 crc kubenswrapper[4869]: I0929 13:58:32.090219 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" event={"ID":"7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0","Type":"ContainerStarted","Data":"cf42693e085a005ac1da7cd72c79d0a52d2cdbe44e97e218588b6ea515adb323"} Sep 29 13:58:32 crc kubenswrapper[4869]: I0929 13:58:32.090566 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" Sep 29 13:58:32 crc kubenswrapper[4869]: I0929 13:58:32.138583 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" podStartSLOduration=3.062716031 podStartE2EDuration="41.138556313s" podCreationTimestamp="2025-09-29 13:57:51 +0000 UTC" firstStartedPulling="2025-09-29 13:57:53.590460872 +0000 UTC m=+1000.031105192" lastFinishedPulling="2025-09-29 13:58:31.666301134 +0000 UTC m=+1038.106945474" observedRunningTime="2025-09-29 13:58:32.137127116 +0000 UTC m=+1038.577771436" watchObservedRunningTime="2025-09-29 13:58:32.138556313 +0000 UTC m=+1038.579200633" Sep 29 13:58:32 crc kubenswrapper[4869]: I0929 13:58:32.560032 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-54d766c9f9-ggvvf" Sep 29 13:58:32 crc kubenswrapper[4869]: I0929 13:58:32.663945 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-c7c776c96-v9wbw" Sep 29 13:58:32 crc kubenswrapper[4869]: I0929 13:58:32.708077 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-76fcc6dc7c-dlvlw" Sep 29 13:58:32 crc kubenswrapper[4869]: I0929 13:58:32.858969 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-bc7dc7bd9-59sjm" Sep 29 13:58:34 crc kubenswrapper[4869]: I0929 13:58:34.048176 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-858cd69f49-lxvwh" Sep 29 13:58:42 crc kubenswrapper[4869]: I0929 13:58:42.279583 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-9fc8d5567-lsch2" Sep 29 13:58:42 crc kubenswrapper[4869]: I0929 13:58:42.315645 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-56cf9c6b99-2lgbz" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.339800 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688fdbb689-m2vmp"] Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.343673 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.351407 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688fdbb689-m2vmp"] Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.352162 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.352504 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.352769 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.353089 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-z799b" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.424456 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75f8b56f9c-kkxw4"] Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.427872 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.431721 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.433096 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75f8b56f9c-kkxw4"] Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.476968 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmds4\" (UniqueName: \"kubernetes.io/projected/dc163c51-2805-4fe9-b547-35707a459808-kube-api-access-tmds4\") pod \"dnsmasq-dns-688fdbb689-m2vmp\" (UID: \"dc163c51-2805-4fe9-b547-35707a459808\") " pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.477133 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc163c51-2805-4fe9-b547-35707a459808-config\") pod \"dnsmasq-dns-688fdbb689-m2vmp\" (UID: \"dc163c51-2805-4fe9-b547-35707a459808\") " pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.578463 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-dns-svc\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.578538 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7qq5\" (UniqueName: \"kubernetes.io/projected/73665388-f46f-4939-b2eb-29cd02e173b2-kube-api-access-h7qq5\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.578587 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-config\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.578669 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmds4\" (UniqueName: \"kubernetes.io/projected/dc163c51-2805-4fe9-b547-35707a459808-kube-api-access-tmds4\") pod \"dnsmasq-dns-688fdbb689-m2vmp\" (UID: \"dc163c51-2805-4fe9-b547-35707a459808\") " pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.578700 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc163c51-2805-4fe9-b547-35707a459808-config\") pod \"dnsmasq-dns-688fdbb689-m2vmp\" (UID: \"dc163c51-2805-4fe9-b547-35707a459808\") " pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.580273 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc163c51-2805-4fe9-b547-35707a459808-config\") pod \"dnsmasq-dns-688fdbb689-m2vmp\" (UID: \"dc163c51-2805-4fe9-b547-35707a459808\") " pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.604306 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmds4\" (UniqueName: \"kubernetes.io/projected/dc163c51-2805-4fe9-b547-35707a459808-kube-api-access-tmds4\") pod \"dnsmasq-dns-688fdbb689-m2vmp\" (UID: \"dc163c51-2805-4fe9-b547-35707a459808\") " pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.679627 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7qq5\" (UniqueName: \"kubernetes.io/projected/73665388-f46f-4939-b2eb-29cd02e173b2-kube-api-access-h7qq5\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.679704 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-config\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.679787 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-dns-svc\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.680810 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-dns-svc\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.681156 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-config\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.683214 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.706376 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7qq5\" (UniqueName: \"kubernetes.io/projected/73665388-f46f-4939-b2eb-29cd02e173b2-kube-api-access-h7qq5\") pod \"dnsmasq-dns-75f8b56f9c-kkxw4\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:00 crc kubenswrapper[4869]: I0929 13:59:00.749139 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:01 crc kubenswrapper[4869]: I0929 13:59:01.215733 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75f8b56f9c-kkxw4"] Sep 29 13:59:01 crc kubenswrapper[4869]: I0929 13:59:01.295744 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688fdbb689-m2vmp"] Sep 29 13:59:01 crc kubenswrapper[4869]: W0929 13:59:01.303521 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc163c51_2805_4fe9_b547_35707a459808.slice/crio-5f9454af3a866dd35ce75b0b98090e2e2956e9c6953563e872b650e7ac81aca5 WatchSource:0}: Error finding container 5f9454af3a866dd35ce75b0b98090e2e2956e9c6953563e872b650e7ac81aca5: Status 404 returned error can't find the container with id 5f9454af3a866dd35ce75b0b98090e2e2956e9c6953563e872b650e7ac81aca5 Sep 29 13:59:01 crc kubenswrapper[4869]: I0929 13:59:01.351952 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" event={"ID":"dc163c51-2805-4fe9-b547-35707a459808","Type":"ContainerStarted","Data":"5f9454af3a866dd35ce75b0b98090e2e2956e9c6953563e872b650e7ac81aca5"} Sep 29 13:59:01 crc kubenswrapper[4869]: I0929 13:59:01.354338 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" event={"ID":"73665388-f46f-4939-b2eb-29cd02e173b2","Type":"ContainerStarted","Data":"6e04ec12da91287da0093e04cf869c153d27e7c6fc193c99d5b8fb640e303d0a"} Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.363150 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688fdbb689-m2vmp"] Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.381511 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-65b98c69ff-2hdwm"] Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.407044 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.484454 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65b98c69ff-2hdwm"] Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.549905 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-dns-svc\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.550240 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-config\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.550371 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbvnt\" (UniqueName: \"kubernetes.io/projected/a6836230-3552-4f28-bda9-14e88ed79b41-kube-api-access-kbvnt\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.646694 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75f8b56f9c-kkxw4"] Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.653007 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-dns-svc\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.653084 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-config\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.653166 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbvnt\" (UniqueName: \"kubernetes.io/projected/a6836230-3552-4f28-bda9-14e88ed79b41-kube-api-access-kbvnt\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.654276 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-config\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.654305 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-dns-svc\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.677484 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbvnt\" (UniqueName: \"kubernetes.io/projected/a6836230-3552-4f28-bda9-14e88ed79b41-kube-api-access-kbvnt\") pod \"dnsmasq-dns-65b98c69ff-2hdwm\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.707543 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76597445-j4kl2"] Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.709770 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.716858 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76597445-j4kl2"] Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.781307 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.858206 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-config\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.858269 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-dns-svc\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.860541 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqm74\" (UniqueName: \"kubernetes.io/projected/47a3cbc5-94fc-419d-8a41-03d0273b8e61-kube-api-access-fqm74\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.962845 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqm74\" (UniqueName: \"kubernetes.io/projected/47a3cbc5-94fc-419d-8a41-03d0273b8e61-kube-api-access-fqm74\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.962903 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-config\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.962921 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-dns-svc\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.964848 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-dns-svc\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.964887 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-config\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:04 crc kubenswrapper[4869]: I0929 13:59:04.986818 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqm74\" (UniqueName: \"kubernetes.io/projected/47a3cbc5-94fc-419d-8a41-03d0273b8e61-kube-api-access-fqm74\") pod \"dnsmasq-dns-76597445-j4kl2\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.021600 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65b98c69ff-2hdwm"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.043459 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.082904 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bf4c785d5-6ck9l"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.084646 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.094006 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bf4c785d5-6ck9l"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.166639 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8wtg\" (UniqueName: \"kubernetes.io/projected/087de56b-f2f8-425d-9760-df2ec10ecd9c-kube-api-access-r8wtg\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.166733 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-dns-svc\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.166931 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-config\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.270951 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-dns-svc\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.271047 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-config\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.271111 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8wtg\" (UniqueName: \"kubernetes.io/projected/087de56b-f2f8-425d-9760-df2ec10ecd9c-kube-api-access-r8wtg\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.272200 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-dns-svc\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.272355 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-config\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.296708 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8wtg\" (UniqueName: \"kubernetes.io/projected/087de56b-f2f8-425d-9760-df2ec10ecd9c-kube-api-access-r8wtg\") pod \"dnsmasq-dns-bf4c785d5-6ck9l\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.355796 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76597445-j4kl2"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.396950 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65b98c69ff-2hdwm"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.433201 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.453355 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" event={"ID":"a6836230-3552-4f28-bda9-14e88ed79b41","Type":"ContainerStarted","Data":"3b251c35444b5f34656d11efeb4b36b5856b50980fbb8da883fb23d2e994d704"} Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.457841 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76597445-j4kl2" event={"ID":"47a3cbc5-94fc-419d-8a41-03d0273b8e61","Type":"ContainerStarted","Data":"108d1b9047d82e0e1d3d6d0d4541b42ab7210c862d95c1f53de2a59017caf56d"} Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.531008 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.532586 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.535080 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.535309 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.536624 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.539460 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-n952x" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.539467 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.539728 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.539891 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.549271 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680183 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680256 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680288 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680316 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680362 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0d97e3c5-9850-428b-9d88-89307901912d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680390 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680410 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680431 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680458 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680479 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0d97e3c5-9850-428b-9d88-89307901912d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.680502 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpmrz\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-kube-api-access-vpmrz\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.782397 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.782854 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.782897 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.782949 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.782975 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0d97e3c5-9850-428b-9d88-89307901912d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.783001 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpmrz\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-kube-api-access-vpmrz\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.783040 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.783094 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.783132 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.783172 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.783240 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0d97e3c5-9850-428b-9d88-89307901912d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.786147 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.788205 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.788351 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.793306 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.800739 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.803788 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.803818 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0d97e3c5-9850-428b-9d88-89307901912d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.804390 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.805022 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.805590 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0d97e3c5-9850-428b-9d88-89307901912d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.823333 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpmrz\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-kube-api-access-vpmrz\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.833400 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.852750 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.854537 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.855255 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.862421 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.862854 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xwt5x" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.862876 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.863087 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.863357 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.863506 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.863589 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.866742 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989215 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989295 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989338 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989364 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989409 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989433 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72xx4\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-kube-api-access-72xx4\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989458 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989480 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989499 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-config-data\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989520 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:05 crc kubenswrapper[4869]: I0929 13:59:05.989538 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091167 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091260 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091302 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091366 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091392 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72xx4\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-kube-api-access-72xx4\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091437 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091463 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091503 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-config-data\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091523 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091542 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.091639 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.093545 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.094566 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.095317 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-config-data\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.095776 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.097488 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.098788 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.098918 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.103246 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.115557 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.122811 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bf4c785d5-6ck9l"] Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.125329 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.126103 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72xx4\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-kube-api-access-72xx4\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.139501 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.178649 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.187666 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.194248 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.194532 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.195017 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.195024 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.195255 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.195322 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.196288 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-66829" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.196336 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.216666 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296409 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296478 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296513 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0b77ed89-e796-4138-ae2c-fcd5f2125233-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296539 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296579 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296632 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296653 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vxfr\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-kube-api-access-2vxfr\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296670 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296698 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296721 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.296750 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0b77ed89-e796-4138-ae2c-fcd5f2125233-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.398082 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.398156 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vxfr\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-kube-api-access-2vxfr\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.398189 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.398261 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.398289 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.398319 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0b77ed89-e796-4138-ae2c-fcd5f2125233-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.398393 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.398826 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.399129 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.401042 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.403549 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0b77ed89-e796-4138-ae2c-fcd5f2125233-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.403814 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.409875 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.410697 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.413324 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.413479 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0b77ed89-e796-4138-ae2c-fcd5f2125233-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.413544 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.413782 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.414497 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0b77ed89-e796-4138-ae2c-fcd5f2125233-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.415355 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0b77ed89-e796-4138-ae2c-fcd5f2125233-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.418209 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vxfr\" (UniqueName: \"kubernetes.io/projected/0b77ed89-e796-4138-ae2c-fcd5f2125233-kube-api-access-2vxfr\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.418974 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0b77ed89-e796-4138-ae2c-fcd5f2125233-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.440126 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"0b77ed89-e796-4138-ae2c-fcd5f2125233\") " pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.496544 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 13:59:06 crc kubenswrapper[4869]: I0929 13:59:06.524627 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.196933 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.201591 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.211674 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.213969 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.213973 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.214140 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-k4jtj" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.214536 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.216875 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.226271 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371189 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/12b51255-a7f5-4295-9367-e8198b8a5c55-config-data-generated\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371280 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-config-data-default\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371366 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371383 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371404 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8knss\" (UniqueName: \"kubernetes.io/projected/12b51255-a7f5-4295-9367-e8198b8a5c55-kube-api-access-8knss\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371445 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371491 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-secrets\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371691 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-operator-scripts\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.371856 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-kolla-config\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.474858 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/12b51255-a7f5-4295-9367-e8198b8a5c55-config-data-generated\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.474277 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/12b51255-a7f5-4295-9367-e8198b8a5c55-config-data-generated\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.474968 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-config-data-default\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.475317 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.475741 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-config-data-default\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.475780 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.475818 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.475837 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8knss\" (UniqueName: \"kubernetes.io/projected/12b51255-a7f5-4295-9367-e8198b8a5c55-kube-api-access-8knss\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.476339 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.476377 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-secrets\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.476418 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-operator-scripts\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.476537 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-kolla-config\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.477050 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-kolla-config\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.478519 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12b51255-a7f5-4295-9367-e8198b8a5c55-operator-scripts\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.481007 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.489403 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.491163 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/12b51255-a7f5-4295-9367-e8198b8a5c55-secrets\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.499283 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8knss\" (UniqueName: \"kubernetes.io/projected/12b51255-a7f5-4295-9367-e8198b8a5c55-kube-api-access-8knss\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.509653 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"12b51255-a7f5-4295-9367-e8198b8a5c55\") " pod="openstack/openstack-galera-0" Sep 29 13:59:08 crc kubenswrapper[4869]: I0929 13:59:08.531445 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.146981 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.151854 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.154351 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.155430 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.155732 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-fbswx" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.155876 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.156026 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.292875 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.292940 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.293087 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fw8fc\" (UniqueName: \"kubernetes.io/projected/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-kube-api-access-fw8fc\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.293157 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.293212 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.293254 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.293306 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.293355 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.293396 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395227 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395319 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395350 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395375 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395468 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395500 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395532 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fw8fc\" (UniqueName: \"kubernetes.io/projected/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-kube-api-access-fw8fc\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395558 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.395591 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.396322 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.396445 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.396779 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.397150 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.397239 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.400746 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.400877 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.411388 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.413763 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fw8fc\" (UniqueName: \"kubernetes.io/projected/d3b25f43-ab33-4712-ac4c-70cf4bba6ce2-kube-api-access-fw8fc\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.427461 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2\") " pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.483755 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.797629 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.798811 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.804418 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.804738 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.805875 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-967k6" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.814013 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.909141 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwnxj\" (UniqueName: \"kubernetes.io/projected/6168192e-6336-436d-9883-f9608ade43dc-kube-api-access-nwnxj\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.909219 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6168192e-6336-436d-9883-f9608ade43dc-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.909249 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6168192e-6336-436d-9883-f9608ade43dc-kolla-config\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.909279 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6168192e-6336-436d-9883-f9608ade43dc-config-data\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:09 crc kubenswrapper[4869]: I0929 13:59:09.909343 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6168192e-6336-436d-9883-f9608ade43dc-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.010672 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6168192e-6336-436d-9883-f9608ade43dc-config-data\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.010786 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6168192e-6336-436d-9883-f9608ade43dc-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.010869 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwnxj\" (UniqueName: \"kubernetes.io/projected/6168192e-6336-436d-9883-f9608ade43dc-kube-api-access-nwnxj\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.011673 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6168192e-6336-436d-9883-f9608ade43dc-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.011706 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6168192e-6336-436d-9883-f9608ade43dc-kolla-config\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.011969 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6168192e-6336-436d-9883-f9608ade43dc-config-data\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.012392 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6168192e-6336-436d-9883-f9608ade43dc-kolla-config\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.015558 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6168192e-6336-436d-9883-f9608ade43dc-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.020104 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6168192e-6336-436d-9883-f9608ade43dc-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.036321 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwnxj\" (UniqueName: \"kubernetes.io/projected/6168192e-6336-436d-9883-f9608ade43dc-kube-api-access-nwnxj\") pod \"memcached-0\" (UID: \"6168192e-6336-436d-9883-f9608ade43dc\") " pod="openstack/memcached-0" Sep 29 13:59:10 crc kubenswrapper[4869]: I0929 13:59:10.138911 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Sep 29 13:59:11 crc kubenswrapper[4869]: I0929 13:59:11.416769 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 13:59:11 crc kubenswrapper[4869]: I0929 13:59:11.418263 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Sep 29 13:59:11 crc kubenswrapper[4869]: I0929 13:59:11.420391 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-sjthx" Sep 29 13:59:11 crc kubenswrapper[4869]: I0929 13:59:11.427236 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 13:59:11 crc kubenswrapper[4869]: I0929 13:59:11.542523 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn9jk\" (UniqueName: \"kubernetes.io/projected/39fc25b3-44e9-413a-a50b-71655cb60e49-kube-api-access-wn9jk\") pod \"kube-state-metrics-0\" (UID: \"39fc25b3-44e9-413a-a50b-71655cb60e49\") " pod="openstack/kube-state-metrics-0" Sep 29 13:59:11 crc kubenswrapper[4869]: I0929 13:59:11.644352 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn9jk\" (UniqueName: \"kubernetes.io/projected/39fc25b3-44e9-413a-a50b-71655cb60e49-kube-api-access-wn9jk\") pod \"kube-state-metrics-0\" (UID: \"39fc25b3-44e9-413a-a50b-71655cb60e49\") " pod="openstack/kube-state-metrics-0" Sep 29 13:59:11 crc kubenswrapper[4869]: I0929 13:59:11.665950 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn9jk\" (UniqueName: \"kubernetes.io/projected/39fc25b3-44e9-413a-a50b-71655cb60e49-kube-api-access-wn9jk\") pod \"kube-state-metrics-0\" (UID: \"39fc25b3-44e9-413a-a50b-71655cb60e49\") " pod="openstack/kube-state-metrics-0" Sep 29 13:59:11 crc kubenswrapper[4869]: I0929 13:59:11.738241 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.711895 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.715047 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.718255 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.718277 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.718291 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.718672 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.723122 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-wtzth" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.727639 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.737388 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.769882 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-config\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.769917 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.769962 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/10a66559-191f-464a-8095-efb79cc5b29f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.769988 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.770010 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/10a66559-191f-464a-8095-efb79cc5b29f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.770027 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktbr7\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-kube-api-access-ktbr7\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.770055 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.770453 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.873022 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/10a66559-191f-464a-8095-efb79cc5b29f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.873077 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktbr7\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-kube-api-access-ktbr7\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.873128 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.873179 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.873297 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-config\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.873323 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.873377 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/10a66559-191f-464a-8095-efb79cc5b29f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.873408 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.874377 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/10a66559-191f-464a-8095-efb79cc5b29f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.878584 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/10a66559-191f-464a-8095-efb79cc5b29f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.878944 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-config\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.879059 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.879101 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.880322 4869 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.880369 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/279150bdb70b2f663e8288477cfabe3c1abac14428cf8cff87f747a1669c049c/globalmount\"" pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.881119 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.896210 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktbr7\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-kube-api-access-ktbr7\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:12.917554 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:13 crc kubenswrapper[4869]: I0929 13:59:13.048139 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.715865 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jhwdk"] Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.718314 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.734689 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-xv89b" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.735404 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.735593 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.750411 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jhwdk"] Sep 29 13:59:14 crc kubenswrapper[4869]: W0929 13:59:14.753419 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d97e3c5_9850_428b_9d88_89307901912d.slice/crio-688b90684503e7dbee7fcb682ee322e2c7b7f809b367ea35f7eeedfcfffb0b7f WatchSource:0}: Error finding container 688b90684503e7dbee7fcb682ee322e2c7b7f809b367ea35f7eeedfcfffb0b7f: Status 404 returned error can't find the container with id 688b90684503e7dbee7fcb682ee322e2c7b7f809b367ea35f7eeedfcfffb0b7f Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.816052 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-log-ovn\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.816103 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-run\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.816136 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-run-ovn\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.816187 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-252w9\" (UniqueName: \"kubernetes.io/projected/6dbad8f0-0816-40ae-b0b4-d6602f352641-kube-api-access-252w9\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.816214 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6dbad8f0-0816-40ae-b0b4-d6602f352641-scripts\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.816251 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6dbad8f0-0816-40ae-b0b4-d6602f352641-ovn-controller-tls-certs\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.816276 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dbad8f0-0816-40ae-b0b4-d6602f352641-combined-ca-bundle\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.854413 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-4w2zq"] Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.856699 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.873168 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-4w2zq"] Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.922595 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8f6m6\" (UniqueName: \"kubernetes.io/projected/c1f79c03-e945-4593-843b-9d9c5f893970-kube-api-access-8f6m6\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939207 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-252w9\" (UniqueName: \"kubernetes.io/projected/6dbad8f0-0816-40ae-b0b4-d6602f352641-kube-api-access-252w9\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939274 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6dbad8f0-0816-40ae-b0b4-d6602f352641-scripts\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939337 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-lib\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939441 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6dbad8f0-0816-40ae-b0b4-d6602f352641-ovn-controller-tls-certs\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939465 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-run\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939510 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dbad8f0-0816-40ae-b0b4-d6602f352641-combined-ca-bundle\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939552 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-log\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939667 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-log-ovn\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939686 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-run\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939722 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1f79c03-e945-4593-843b-9d9c5f893970-scripts\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939763 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-run-ovn\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.939816 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-etc-ovs\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.941423 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-log-ovn\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.941595 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-run\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.941689 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6dbad8f0-0816-40ae-b0b4-d6602f352641-var-run-ovn\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.942590 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6dbad8f0-0816-40ae-b0b4-d6602f352641-scripts\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.955325 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dbad8f0-0816-40ae-b0b4-d6602f352641-combined-ca-bundle\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.964141 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-252w9\" (UniqueName: \"kubernetes.io/projected/6dbad8f0-0816-40ae-b0b4-d6602f352641-kube-api-access-252w9\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:14 crc kubenswrapper[4869]: I0929 13:59:14.965140 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6dbad8f0-0816-40ae-b0b4-d6602f352641-ovn-controller-tls-certs\") pod \"ovn-controller-jhwdk\" (UID: \"6dbad8f0-0816-40ae-b0b4-d6602f352641\") " pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.047060 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-lib\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.047129 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-run\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.047162 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-log\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.047205 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1f79c03-e945-4593-843b-9d9c5f893970-scripts\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.047236 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-etc-ovs\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.047266 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8f6m6\" (UniqueName: \"kubernetes.io/projected/c1f79c03-e945-4593-843b-9d9c5f893970-kube-api-access-8f6m6\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.047883 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-lib\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.048007 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-log\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.048183 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-var-run\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.048431 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c1f79c03-e945-4593-843b-9d9c5f893970-etc-ovs\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.048923 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.049826 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1f79c03-e945-4593-843b-9d9c5f893970-scripts\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.076677 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8f6m6\" (UniqueName: \"kubernetes.io/projected/c1f79c03-e945-4593-843b-9d9c5f893970-kube-api-access-8f6m6\") pod \"ovn-controller-ovs-4w2zq\" (UID: \"c1f79c03-e945-4593-843b-9d9c5f893970\") " pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.184246 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.317745 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.320460 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.325338 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.325572 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.326100 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.326296 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-4pk9b" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.326531 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.343419 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.455299 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.455430 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a91d438-85a7-4b76-ac91-509403d09f26-config\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.455462 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.455576 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.455762 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a91d438-85a7-4b76-ac91-509403d09f26-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.455950 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9a91d438-85a7-4b76-ac91-509403d09f26-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.455981 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhxtg\" (UniqueName: \"kubernetes.io/projected/9a91d438-85a7-4b76-ac91-509403d09f26-kube-api-access-hhxtg\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.456052 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557309 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9a91d438-85a7-4b76-ac91-509403d09f26-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557351 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhxtg\" (UniqueName: \"kubernetes.io/projected/9a91d438-85a7-4b76-ac91-509403d09f26-kube-api-access-hhxtg\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557394 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557418 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557440 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a91d438-85a7-4b76-ac91-509403d09f26-config\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557457 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557498 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557530 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a91d438-85a7-4b76-ac91-509403d09f26-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.557869 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.558107 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9a91d438-85a7-4b76-ac91-509403d09f26-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.558814 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a91d438-85a7-4b76-ac91-509403d09f26-config\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.558958 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a91d438-85a7-4b76-ac91-509403d09f26-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.561341 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.563647 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.569880 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a91d438-85a7-4b76-ac91-509403d09f26-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.577149 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhxtg\" (UniqueName: \"kubernetes.io/projected/9a91d438-85a7-4b76-ac91-509403d09f26-kube-api-access-hhxtg\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.586547 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9a91d438-85a7-4b76-ac91-509403d09f26\") " pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.619965 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" event={"ID":"087de56b-f2f8-425d-9760-df2ec10ecd9c","Type":"ContainerStarted","Data":"394cd4712c9cbc252a077db0f9849ec9e9e0906be8413593ac489dd479064a08"} Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.621311 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0d97e3c5-9850-428b-9d88-89307901912d","Type":"ContainerStarted","Data":"688b90684503e7dbee7fcb682ee322e2c7b7f809b367ea35f7eeedfcfffb0b7f"} Sep 29 13:59:15 crc kubenswrapper[4869]: I0929 13:59:15.642088 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.840833 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.843879 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.847386 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.847406 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.847430 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.852732 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-dm99p" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.859275 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.920233 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39ea57f0-7600-4ca4-912d-c429465aca86-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.920553 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.920716 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.920857 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.921021 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.921150 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/39ea57f0-7600-4ca4-912d-c429465aca86-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.921278 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39ea57f0-7600-4ca4-912d-c429465aca86-config\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:18 crc kubenswrapper[4869]: I0929 13:59:18.921322 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8dnh\" (UniqueName: \"kubernetes.io/projected/39ea57f0-7600-4ca4-912d-c429465aca86-kube-api-access-v8dnh\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.022985 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.023332 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/39ea57f0-7600-4ca4-912d-c429465aca86-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.023504 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39ea57f0-7600-4ca4-912d-c429465aca86-config\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.023635 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8dnh\" (UniqueName: \"kubernetes.io/projected/39ea57f0-7600-4ca4-912d-c429465aca86-kube-api-access-v8dnh\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.023775 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39ea57f0-7600-4ca4-912d-c429465aca86-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.023873 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.023969 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.024065 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.024006 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/39ea57f0-7600-4ca4-912d-c429465aca86-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.024259 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.024513 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39ea57f0-7600-4ca4-912d-c429465aca86-config\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.025251 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39ea57f0-7600-4ca4-912d-c429465aca86-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.030738 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.031242 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.031813 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39ea57f0-7600-4ca4-912d-c429465aca86-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.039632 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8dnh\" (UniqueName: \"kubernetes.io/projected/39ea57f0-7600-4ca4-912d-c429465aca86-kube-api-access-v8dnh\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.051054 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-sb-0\" (UID: \"39ea57f0-7600-4ca4-912d-c429465aca86\") " pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:19 crc kubenswrapper[4869]: I0929 13:59:19.171512 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:20 crc kubenswrapper[4869]: I0929 13:59:20.656915 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:59:20 crc kubenswrapper[4869]: I0929 13:59:20.657287 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:59:21 crc kubenswrapper[4869]: I0929 13:59:21.882209 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 13:59:26 crc kubenswrapper[4869]: W0929 13:59:26.457944 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10a66559_191f_464a_8095_efb79cc5b29f.slice/crio-7db0897924248cffdd4753c851683177b635cf350eefe6cb717ae32ed859393a WatchSource:0}: Error finding container 7db0897924248cffdd4753c851683177b635cf350eefe6cb717ae32ed859393a: Status 404 returned error can't find the container with id 7db0897924248cffdd4753c851683177b635cf350eefe6cb717ae32ed859393a Sep 29 13:59:26 crc kubenswrapper[4869]: I0929 13:59:26.733070 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerStarted","Data":"7db0897924248cffdd4753c851683177b635cf350eefe6cb717ae32ed859393a"} Sep 29 13:59:26 crc kubenswrapper[4869]: I0929 13:59:26.832050 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 13:59:26 crc kubenswrapper[4869]: I0929 13:59:26.897521 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.636535 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.636649 4869 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.636851 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h7qq5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-75f8b56f9c-kkxw4_openstack(73665388-f46f-4939-b2eb-29cd02e173b2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.638148 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" podUID="73665388-f46f-4939-b2eb-29cd02e173b2" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.851961 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.852036 4869 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.852187 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tmds4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-688fdbb689-m2vmp_openstack(dc163c51-2805-4fe9-b547-35707a459808): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.853412 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" podUID="dc163c51-2805-4fe9-b547-35707a459808" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.881821 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.882221 4869 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.882416 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kbvnt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-65b98c69ff-2hdwm_openstack(a6836230-3552-4f28-bda9-14e88ed79b41): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.883863 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" podUID="a6836230-3552-4f28-bda9-14e88ed79b41" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.925770 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.925838 4869 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.925973 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.203:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fqm74,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-76597445-j4kl2_openstack(47a3cbc5-94fc-419d-8a41-03d0273b8e61): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 13:59:27 crc kubenswrapper[4869]: E0929 13:59:27.927129 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-76597445-j4kl2" podUID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.226549 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.316928 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7qq5\" (UniqueName: \"kubernetes.io/projected/73665388-f46f-4939-b2eb-29cd02e173b2-kube-api-access-h7qq5\") pod \"73665388-f46f-4939-b2eb-29cd02e173b2\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.317007 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-config\") pod \"73665388-f46f-4939-b2eb-29cd02e173b2\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.317075 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-dns-svc\") pod \"73665388-f46f-4939-b2eb-29cd02e173b2\" (UID: \"73665388-f46f-4939-b2eb-29cd02e173b2\") " Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.317693 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-config" (OuterVolumeSpecName: "config") pod "73665388-f46f-4939-b2eb-29cd02e173b2" (UID: "73665388-f46f-4939-b2eb-29cd02e173b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.317681 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "73665388-f46f-4939-b2eb-29cd02e173b2" (UID: "73665388-f46f-4939-b2eb-29cd02e173b2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.324819 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73665388-f46f-4939-b2eb-29cd02e173b2-kube-api-access-h7qq5" (OuterVolumeSpecName: "kube-api-access-h7qq5") pod "73665388-f46f-4939-b2eb-29cd02e173b2" (UID: "73665388-f46f-4939-b2eb-29cd02e173b2"). InnerVolumeSpecName "kube-api-access-h7qq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.423008 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7qq5\" (UniqueName: \"kubernetes.io/projected/73665388-f46f-4939-b2eb-29cd02e173b2-kube-api-access-h7qq5\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.423051 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.423065 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73665388-f46f-4939-b2eb-29cd02e173b2-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.446911 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Sep 29 13:59:28 crc kubenswrapper[4869]: W0929 13:59:28.449553 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3b25f43_ab33_4712_ac4c_70cf4bba6ce2.slice/crio-0ab8353c357e60ca466a309357692bf3da68d189fb6ccd5d404bac9b5731cf3f WatchSource:0}: Error finding container 0ab8353c357e60ca466a309357692bf3da68d189fb6ccd5d404bac9b5731cf3f: Status 404 returned error can't find the container with id 0ab8353c357e60ca466a309357692bf3da68d189fb6ccd5d404bac9b5731cf3f Sep 29 13:59:28 crc kubenswrapper[4869]: W0929 13:59:28.453861 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12b51255_a7f5_4295_9367_e8198b8a5c55.slice/crio-ea60a24e497b6b65a6e13cfb3fba324043247390227b62afd923aaf9886180e0 WatchSource:0}: Error finding container ea60a24e497b6b65a6e13cfb3fba324043247390227b62afd923aaf9886180e0: Status 404 returned error can't find the container with id ea60a24e497b6b65a6e13cfb3fba324043247390227b62afd923aaf9886180e0 Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.455374 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Sep 29 13:59:28 crc kubenswrapper[4869]: W0929 13:59:28.457959 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6168192e_6336_436d_9883_f9608ade43dc.slice/crio-681d04b6d9f6be2e03037da4a3995ddc0eff4fa0a794786c25a02681c72bd48b WatchSource:0}: Error finding container 681d04b6d9f6be2e03037da4a3995ddc0eff4fa0a794786c25a02681c72bd48b: Status 404 returned error can't find the container with id 681d04b6d9f6be2e03037da4a3995ddc0eff4fa0a794786c25a02681c72bd48b Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.463966 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.753807 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2","Type":"ContainerStarted","Data":"0ab8353c357e60ca466a309357692bf3da68d189fb6ccd5d404bac9b5731cf3f"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.754796 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" event={"ID":"73665388-f46f-4939-b2eb-29cd02e173b2","Type":"ContainerDied","Data":"6e04ec12da91287da0093e04cf869c153d27e7c6fc193c99d5b8fb640e303d0a"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.754873 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f8b56f9c-kkxw4" Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.757021 4869 generic.go:334] "Generic (PLEG): container finished" podID="087de56b-f2f8-425d-9760-df2ec10ecd9c" containerID="d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5" exitCode=0 Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.758018 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" event={"ID":"087de56b-f2f8-425d-9760-df2ec10ecd9c","Type":"ContainerDied","Data":"d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.759917 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6168192e-6336-436d-9883-f9608ade43dc","Type":"ContainerStarted","Data":"681d04b6d9f6be2e03037da4a3995ddc0eff4fa0a794786c25a02681c72bd48b"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.766486 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0d97e3c5-9850-428b-9d88-89307901912d","Type":"ContainerStarted","Data":"de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.769296 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"39fc25b3-44e9-413a-a50b-71655cb60e49","Type":"ContainerStarted","Data":"b349b7001523dfcec6a96cd078f1040c0101793ebb6454ddd29c6d4d56224753"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.770992 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"12b51255-a7f5-4295-9367-e8198b8a5c55","Type":"ContainerStarted","Data":"ea60a24e497b6b65a6e13cfb3fba324043247390227b62afd923aaf9886180e0"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.772814 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"0b77ed89-e796-4138-ae2c-fcd5f2125233","Type":"ContainerStarted","Data":"e3735329e4b0427828eeb3559bd49f912140d58e93caf291389a25c53a02a95a"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.773086 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"0b77ed89-e796-4138-ae2c-fcd5f2125233","Type":"ContainerStarted","Data":"c5aa420b937b194f825d44fc7a369694732091ffc812cd0a28c3905e93bab081"} Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.849116 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jhwdk"] Sep 29 13:59:28 crc kubenswrapper[4869]: I0929 13:59:28.858033 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.028350 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.051744 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75f8b56f9c-kkxw4"] Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.059383 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75f8b56f9c-kkxw4"] Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.289469 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.302648 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.444904 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc163c51-2805-4fe9-b547-35707a459808-config\") pod \"dc163c51-2805-4fe9-b547-35707a459808\" (UID: \"dc163c51-2805-4fe9-b547-35707a459808\") " Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.445221 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmds4\" (UniqueName: \"kubernetes.io/projected/dc163c51-2805-4fe9-b547-35707a459808-kube-api-access-tmds4\") pod \"dc163c51-2805-4fe9-b547-35707a459808\" (UID: \"dc163c51-2805-4fe9-b547-35707a459808\") " Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.445298 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc163c51-2805-4fe9-b547-35707a459808-config" (OuterVolumeSpecName: "config") pod "dc163c51-2805-4fe9-b547-35707a459808" (UID: "dc163c51-2805-4fe9-b547-35707a459808"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.445342 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-dns-svc\") pod \"a6836230-3552-4f28-bda9-14e88ed79b41\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.445386 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbvnt\" (UniqueName: \"kubernetes.io/projected/a6836230-3552-4f28-bda9-14e88ed79b41-kube-api-access-kbvnt\") pod \"a6836230-3552-4f28-bda9-14e88ed79b41\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.445513 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-config\") pod \"a6836230-3552-4f28-bda9-14e88ed79b41\" (UID: \"a6836230-3552-4f28-bda9-14e88ed79b41\") " Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.445881 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a6836230-3552-4f28-bda9-14e88ed79b41" (UID: "a6836230-3552-4f28-bda9-14e88ed79b41"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.445930 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc163c51-2805-4fe9-b547-35707a459808-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.446290 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-config" (OuterVolumeSpecName: "config") pod "a6836230-3552-4f28-bda9-14e88ed79b41" (UID: "a6836230-3552-4f28-bda9-14e88ed79b41"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.451120 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc163c51-2805-4fe9-b547-35707a459808-kube-api-access-tmds4" (OuterVolumeSpecName: "kube-api-access-tmds4") pod "dc163c51-2805-4fe9-b547-35707a459808" (UID: "dc163c51-2805-4fe9-b547-35707a459808"). InnerVolumeSpecName "kube-api-access-tmds4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.453939 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6836230-3552-4f28-bda9-14e88ed79b41-kube-api-access-kbvnt" (OuterVolumeSpecName: "kube-api-access-kbvnt") pod "a6836230-3552-4f28-bda9-14e88ed79b41" (UID: "a6836230-3552-4f28-bda9-14e88ed79b41"). InnerVolumeSpecName "kube-api-access-kbvnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.548265 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.548305 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmds4\" (UniqueName: \"kubernetes.io/projected/dc163c51-2805-4fe9-b547-35707a459808-kube-api-access-tmds4\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.548323 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6836230-3552-4f28-bda9-14e88ed79b41-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.548338 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbvnt\" (UniqueName: \"kubernetes.io/projected/a6836230-3552-4f28-bda9-14e88ed79b41-kube-api-access-kbvnt\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.647367 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Sep 29 13:59:29 crc kubenswrapper[4869]: W0929 13:59:29.683400 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a91d438_85a7_4b76_ac91_509403d09f26.slice/crio-14bb46f88314d13be918b6f81491a9dd83c8718ffb419a2f3801d47dd32c7c13 WatchSource:0}: Error finding container 14bb46f88314d13be918b6f81491a9dd83c8718ffb419a2f3801d47dd32c7c13: Status 404 returned error can't find the container with id 14bb46f88314d13be918b6f81491a9dd83c8718ffb419a2f3801d47dd32c7c13 Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.757985 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-4w2zq"] Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.788315 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" event={"ID":"087de56b-f2f8-425d-9760-df2ec10ecd9c","Type":"ContainerStarted","Data":"b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.788875 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.797901 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39ea57f0-7600-4ca4-912d-c429465aca86","Type":"ContainerStarted","Data":"522b8486b0ef9cdca4915c1530e685a39e22aef687f513397c66d318f34b5b6c"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.802408 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a91d438-85a7-4b76-ac91-509403d09f26","Type":"ContainerStarted","Data":"14bb46f88314d13be918b6f81491a9dd83c8718ffb419a2f3801d47dd32c7c13"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.808745 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" event={"ID":"a6836230-3552-4f28-bda9-14e88ed79b41","Type":"ContainerDied","Data":"3b251c35444b5f34656d11efeb4b36b5856b50980fbb8da883fb23d2e994d704"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.808794 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65b98c69ff-2hdwm" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.812225 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" event={"ID":"dc163c51-2805-4fe9-b547-35707a459808","Type":"ContainerDied","Data":"5f9454af3a866dd35ce75b0b98090e2e2956e9c6953563e872b650e7ac81aca5"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.812247 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688fdbb689-m2vmp" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.812586 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" podStartSLOduration=12.460115437 podStartE2EDuration="24.812557754s" podCreationTimestamp="2025-09-29 13:59:05 +0000 UTC" firstStartedPulling="2025-09-29 13:59:15.615694562 +0000 UTC m=+1082.056338872" lastFinishedPulling="2025-09-29 13:59:27.968136869 +0000 UTC m=+1094.408781189" observedRunningTime="2025-09-29 13:59:29.807920063 +0000 UTC m=+1096.248564383" watchObservedRunningTime="2025-09-29 13:59:29.812557754 +0000 UTC m=+1096.253202084" Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.813319 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jhwdk" event={"ID":"6dbad8f0-0816-40ae-b0b4-d6602f352641","Type":"ContainerStarted","Data":"0ea401bdb184eb537b2e8ce79940bc171c740406df08cbf075f4a1201b67f9d5"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.814831 4869 generic.go:334] "Generic (PLEG): container finished" podID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" containerID="873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372" exitCode=0 Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.814897 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76597445-j4kl2" event={"ID":"47a3cbc5-94fc-419d-8a41-03d0273b8e61","Type":"ContainerDied","Data":"873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.818837 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ffba5854-b48f-4fd4-ba4b-0f1a0601239d","Type":"ContainerStarted","Data":"17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.818915 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ffba5854-b48f-4fd4-ba4b-0f1a0601239d","Type":"ContainerStarted","Data":"e659e9616738ed223cc31bc285f2949d371c8d0a1f443cc010cf5c8d8ad9a394"} Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.868237 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65b98c69ff-2hdwm"] Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.871972 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-65b98c69ff-2hdwm"] Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.944708 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688fdbb689-m2vmp"] Sep 29 13:59:29 crc kubenswrapper[4869]: I0929 13:59:29.950228 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688fdbb689-m2vmp"] Sep 29 13:59:30 crc kubenswrapper[4869]: I0929 13:59:30.258415 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73665388-f46f-4939-b2eb-29cd02e173b2" path="/var/lib/kubelet/pods/73665388-f46f-4939-b2eb-29cd02e173b2/volumes" Sep 29 13:59:30 crc kubenswrapper[4869]: I0929 13:59:30.258848 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6836230-3552-4f28-bda9-14e88ed79b41" path="/var/lib/kubelet/pods/a6836230-3552-4f28-bda9-14e88ed79b41/volumes" Sep 29 13:59:30 crc kubenswrapper[4869]: I0929 13:59:30.259329 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc163c51-2805-4fe9-b547-35707a459808" path="/var/lib/kubelet/pods/dc163c51-2805-4fe9-b547-35707a459808/volumes" Sep 29 13:59:30 crc kubenswrapper[4869]: I0929 13:59:30.829172 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4w2zq" event={"ID":"c1f79c03-e945-4593-843b-9d9c5f893970","Type":"ContainerStarted","Data":"a4347b6165e1be538f89cea31f16504d0cf9d1d0c24a004ff4e1eb080014e255"} Sep 29 13:59:35 crc kubenswrapper[4869]: I0929 13:59:35.434794 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:35 crc kubenswrapper[4869]: I0929 13:59:35.487841 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76597445-j4kl2"] Sep 29 13:59:36 crc kubenswrapper[4869]: I0929 13:59:36.873746 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76597445-j4kl2" event={"ID":"47a3cbc5-94fc-419d-8a41-03d0273b8e61","Type":"ContainerStarted","Data":"d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8"} Sep 29 13:59:36 crc kubenswrapper[4869]: I0929 13:59:36.874294 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:36 crc kubenswrapper[4869]: I0929 13:59:36.873959 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76597445-j4kl2" podUID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" containerName="dnsmasq-dns" containerID="cri-o://d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8" gracePeriod=10 Sep 29 13:59:36 crc kubenswrapper[4869]: I0929 13:59:36.891666 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76597445-j4kl2" podStartSLOduration=-9223372003.963135 podStartE2EDuration="32.891640121s" podCreationTimestamp="2025-09-29 13:59:04 +0000 UTC" firstStartedPulling="2025-09-29 13:59:05.361746847 +0000 UTC m=+1071.802391167" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:59:36.891019365 +0000 UTC m=+1103.331663685" watchObservedRunningTime="2025-09-29 13:59:36.891640121 +0000 UTC m=+1103.332284441" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.748018 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.882775 4869 generic.go:334] "Generic (PLEG): container finished" podID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" containerID="d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8" exitCode=0 Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.882839 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76597445-j4kl2" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.882862 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76597445-j4kl2" event={"ID":"47a3cbc5-94fc-419d-8a41-03d0273b8e61","Type":"ContainerDied","Data":"d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8"} Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.884427 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76597445-j4kl2" event={"ID":"47a3cbc5-94fc-419d-8a41-03d0273b8e61","Type":"ContainerDied","Data":"108d1b9047d82e0e1d3d6d0d4541b42ab7210c862d95c1f53de2a59017caf56d"} Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.884453 4869 scope.go:117] "RemoveContainer" containerID="d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.886491 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2","Type":"ContainerStarted","Data":"6de05cb70f9d87fe52b4dfd9af1a84e20327741aea92ff2554f0e8690630d8b9"} Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.888498 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6168192e-6336-436d-9883-f9608ade43dc","Type":"ContainerStarted","Data":"c9fdf42e3c5a71c077212924304dcbdee288ed33ff7791e19be975b215eaddbd"} Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.889987 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.906682 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-dns-svc\") pod \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.906783 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqm74\" (UniqueName: \"kubernetes.io/projected/47a3cbc5-94fc-419d-8a41-03d0273b8e61-kube-api-access-fqm74\") pod \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.906849 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-config\") pod \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\" (UID: \"47a3cbc5-94fc-419d-8a41-03d0273b8e61\") " Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.927736 4869 scope.go:117] "RemoveContainer" containerID="873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.959479 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47a3cbc5-94fc-419d-8a41-03d0273b8e61-kube-api-access-fqm74" (OuterVolumeSpecName: "kube-api-access-fqm74") pod "47a3cbc5-94fc-419d-8a41-03d0273b8e61" (UID: "47a3cbc5-94fc-419d-8a41-03d0273b8e61"). InnerVolumeSpecName "kube-api-access-fqm74". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.969601 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=21.157042079 podStartE2EDuration="28.969508471s" podCreationTimestamp="2025-09-29 13:59:09 +0000 UTC" firstStartedPulling="2025-09-29 13:59:28.462077471 +0000 UTC m=+1094.902721791" lastFinishedPulling="2025-09-29 13:59:36.274543863 +0000 UTC m=+1102.715188183" observedRunningTime="2025-09-29 13:59:37.916140473 +0000 UTC m=+1104.356784803" watchObservedRunningTime="2025-09-29 13:59:37.969508471 +0000 UTC m=+1104.410152811" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.983022 4869 scope.go:117] "RemoveContainer" containerID="d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8" Sep 29 13:59:37 crc kubenswrapper[4869]: E0929 13:59:37.984727 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8\": container with ID starting with d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8 not found: ID does not exist" containerID="d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.984773 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8"} err="failed to get container status \"d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8\": rpc error: code = NotFound desc = could not find container \"d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8\": container with ID starting with d8317151c668b10a7447825e98d2d0c46179d2153ff6bad0c5403e5d884b05c8 not found: ID does not exist" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.984799 4869 scope.go:117] "RemoveContainer" containerID="873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372" Sep 29 13:59:37 crc kubenswrapper[4869]: E0929 13:59:37.985646 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372\": container with ID starting with 873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372 not found: ID does not exist" containerID="873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372" Sep 29 13:59:37 crc kubenswrapper[4869]: I0929 13:59:37.985709 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372"} err="failed to get container status \"873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372\": rpc error: code = NotFound desc = could not find container \"873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372\": container with ID starting with 873862b000d411758f59226ffec31ee4997a6acaf8d0d6c47cbe62bf82ab5372 not found: ID does not exist" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.009848 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqm74\" (UniqueName: \"kubernetes.io/projected/47a3cbc5-94fc-419d-8a41-03d0273b8e61-kube-api-access-fqm74\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.295420 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-config" (OuterVolumeSpecName: "config") pod "47a3cbc5-94fc-419d-8a41-03d0273b8e61" (UID: "47a3cbc5-94fc-419d-8a41-03d0273b8e61"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.315010 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.352803 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "47a3cbc5-94fc-419d-8a41-03d0273b8e61" (UID: "47a3cbc5-94fc-419d-8a41-03d0273b8e61"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.416408 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47a3cbc5-94fc-419d-8a41-03d0273b8e61-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.513753 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76597445-j4kl2"] Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.519817 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76597445-j4kl2"] Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.900176 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"39fc25b3-44e9-413a-a50b-71655cb60e49","Type":"ContainerStarted","Data":"b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5"} Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.901016 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.903417 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4w2zq" event={"ID":"c1f79c03-e945-4593-843b-9d9c5f893970","Type":"ContainerStarted","Data":"0358bf07b452b1f693d63893f156daa50c38f5aceebf1c5dbf718d0f0b512772"} Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.906461 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jhwdk" event={"ID":"6dbad8f0-0816-40ae-b0b4-d6602f352641","Type":"ContainerStarted","Data":"3615e0ae47b2869d8440497780afcf2445195fabc3ca8ccd8344156e5bfcbbef"} Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.907225 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-jhwdk" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.910796 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"12b51255-a7f5-4295-9367-e8198b8a5c55","Type":"ContainerStarted","Data":"3511b7041497df08d22180b83d99fb29c526f3f9a38399f12479cfd35e9eb846"} Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.912449 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39ea57f0-7600-4ca4-912d-c429465aca86","Type":"ContainerStarted","Data":"4c768d4034ffce6169e0d1fd2140f15f5b2757bd620ff519d321bebb9dd86300"} Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.917186 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a91d438-85a7-4b76-ac91-509403d09f26","Type":"ContainerStarted","Data":"d2dd9f02ff8b50efe190852f6bbff026ce57724d08d142d9005f1a18646892af"} Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.924216 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=18.462445703 podStartE2EDuration="27.924197957s" podCreationTimestamp="2025-09-29 13:59:11 +0000 UTC" firstStartedPulling="2025-09-29 13:59:27.8072436 +0000 UTC m=+1094.247887920" lastFinishedPulling="2025-09-29 13:59:37.268995854 +0000 UTC m=+1103.709640174" observedRunningTime="2025-09-29 13:59:38.917055261 +0000 UTC m=+1105.357699581" watchObservedRunningTime="2025-09-29 13:59:38.924197957 +0000 UTC m=+1105.364842277" Sep 29 13:59:38 crc kubenswrapper[4869]: I0929 13:59:38.943510 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-jhwdk" podStartSLOduration=17.309225878 podStartE2EDuration="24.943479568s" podCreationTimestamp="2025-09-29 13:59:14 +0000 UTC" firstStartedPulling="2025-09-29 13:59:28.889407632 +0000 UTC m=+1095.330051952" lastFinishedPulling="2025-09-29 13:59:36.523661322 +0000 UTC m=+1102.964305642" observedRunningTime="2025-09-29 13:59:38.939219088 +0000 UTC m=+1105.379863418" watchObservedRunningTime="2025-09-29 13:59:38.943479568 +0000 UTC m=+1105.384123888" Sep 29 13:59:39 crc kubenswrapper[4869]: I0929 13:59:39.926846 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerStarted","Data":"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af"} Sep 29 13:59:39 crc kubenswrapper[4869]: I0929 13:59:39.928536 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4w2zq" event={"ID":"c1f79c03-e945-4593-843b-9d9c5f893970","Type":"ContainerDied","Data":"0358bf07b452b1f693d63893f156daa50c38f5aceebf1c5dbf718d0f0b512772"} Sep 29 13:59:39 crc kubenswrapper[4869]: I0929 13:59:39.928601 4869 generic.go:334] "Generic (PLEG): container finished" podID="c1f79c03-e945-4593-843b-9d9c5f893970" containerID="0358bf07b452b1f693d63893f156daa50c38f5aceebf1c5dbf718d0f0b512772" exitCode=0 Sep 29 13:59:40 crc kubenswrapper[4869]: I0929 13:59:40.253901 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" path="/var/lib/kubelet/pods/47a3cbc5-94fc-419d-8a41-03d0273b8e61/volumes" Sep 29 13:59:41 crc kubenswrapper[4869]: I0929 13:59:41.948259 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4w2zq" event={"ID":"c1f79c03-e945-4593-843b-9d9c5f893970","Type":"ContainerStarted","Data":"b03184ff4a7f3e97dac17924ab46543c8a85e5779871ce90deaf1453a8dcaf7c"} Sep 29 13:59:41 crc kubenswrapper[4869]: I0929 13:59:41.950506 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:41 crc kubenswrapper[4869]: I0929 13:59:41.950602 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4w2zq" event={"ID":"c1f79c03-e945-4593-843b-9d9c5f893970","Type":"ContainerStarted","Data":"e6dc574725b9f89eddbd6abcf19ac3889d8594fa573d4b5117872f15bb73cde1"} Sep 29 13:59:41 crc kubenswrapper[4869]: I0929 13:59:41.950881 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39ea57f0-7600-4ca4-912d-c429465aca86","Type":"ContainerStarted","Data":"8bda074f256dca5357719b71d5ddfb0734f83b104de26435e601ccd49c791d2d"} Sep 29 13:59:41 crc kubenswrapper[4869]: I0929 13:59:41.954505 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a91d438-85a7-4b76-ac91-509403d09f26","Type":"ContainerStarted","Data":"1f85de817e4bc5ad87df83b059f5ae2d34b7ec3d1981a5ed4f4e99e51ec1eec8"} Sep 29 13:59:41 crc kubenswrapper[4869]: I0929 13:59:41.974895 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-4w2zq" podStartSLOduration=21.893867104 podStartE2EDuration="27.974865469s" podCreationTimestamp="2025-09-29 13:59:14 +0000 UTC" firstStartedPulling="2025-09-29 13:59:30.282168163 +0000 UTC m=+1096.722812483" lastFinishedPulling="2025-09-29 13:59:36.363166518 +0000 UTC m=+1102.803810848" observedRunningTime="2025-09-29 13:59:41.969241612 +0000 UTC m=+1108.409885972" watchObservedRunningTime="2025-09-29 13:59:41.974865469 +0000 UTC m=+1108.415509799" Sep 29 13:59:41 crc kubenswrapper[4869]: I0929 13:59:41.993338 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=16.825010085 podStartE2EDuration="27.993317268s" podCreationTimestamp="2025-09-29 13:59:14 +0000 UTC" firstStartedPulling="2025-09-29 13:59:29.686419817 +0000 UTC m=+1096.127064127" lastFinishedPulling="2025-09-29 13:59:40.85472699 +0000 UTC m=+1107.295371310" observedRunningTime="2025-09-29 13:59:41.992565909 +0000 UTC m=+1108.433210249" watchObservedRunningTime="2025-09-29 13:59:41.993317268 +0000 UTC m=+1108.433961588" Sep 29 13:59:42 crc kubenswrapper[4869]: I0929 13:59:42.021360 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=13.183591193 podStartE2EDuration="25.021337677s" podCreationTimestamp="2025-09-29 13:59:17 +0000 UTC" firstStartedPulling="2025-09-29 13:59:29.033356182 +0000 UTC m=+1095.474000502" lastFinishedPulling="2025-09-29 13:59:40.871102666 +0000 UTC m=+1107.311746986" observedRunningTime="2025-09-29 13:59:42.017949419 +0000 UTC m=+1108.458593739" watchObservedRunningTime="2025-09-29 13:59:42.021337677 +0000 UTC m=+1108.461981997" Sep 29 13:59:42 crc kubenswrapper[4869]: I0929 13:59:42.643062 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:42 crc kubenswrapper[4869]: I0929 13:59:42.687915 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:42 crc kubenswrapper[4869]: I0929 13:59:42.961603 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 13:59:42 crc kubenswrapper[4869]: I0929 13:59:42.961995 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:42 crc kubenswrapper[4869]: I0929 13:59:42.997203 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.175791 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.218803 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.259625 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d5db99669-kn4gh"] Sep 29 13:59:43 crc kubenswrapper[4869]: E0929 13:59:43.260086 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" containerName="init" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.260111 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" containerName="init" Sep 29 13:59:43 crc kubenswrapper[4869]: E0929 13:59:43.260140 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" containerName="dnsmasq-dns" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.260149 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" containerName="dnsmasq-dns" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.260322 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="47a3cbc5-94fc-419d-8a41-03d0273b8e61" containerName="dnsmasq-dns" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.261515 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.278076 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.289751 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d5db99669-kn4gh"] Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.393087 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-wgzgq"] Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.394341 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.398491 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.409996 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-config\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.410059 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq9ht\" (UniqueName: \"kubernetes.io/projected/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-kube-api-access-sq9ht\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.410090 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.410176 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-dns-svc\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.416218 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wgzgq"] Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.511641 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khvkq\" (UniqueName: \"kubernetes.io/projected/6489e730-0811-4b9e-a82c-a36987e0db21-kube-api-access-khvkq\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.511710 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6489e730-0811-4b9e-a82c-a36987e0db21-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.511740 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-dns-svc\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.511776 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6489e730-0811-4b9e-a82c-a36987e0db21-config\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.512382 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6489e730-0811-4b9e-a82c-a36987e0db21-ovs-rundir\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.512490 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-config\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.512562 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq9ht\" (UniqueName: \"kubernetes.io/projected/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-kube-api-access-sq9ht\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.512622 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.512740 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6489e730-0811-4b9e-a82c-a36987e0db21-combined-ca-bundle\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.512807 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6489e730-0811-4b9e-a82c-a36987e0db21-ovn-rundir\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.513124 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-dns-svc\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.513791 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.514025 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-config\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.550519 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq9ht\" (UniqueName: \"kubernetes.io/projected/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-kube-api-access-sq9ht\") pod \"dnsmasq-dns-7d5db99669-kn4gh\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.600185 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.618159 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khvkq\" (UniqueName: \"kubernetes.io/projected/6489e730-0811-4b9e-a82c-a36987e0db21-kube-api-access-khvkq\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.618231 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6489e730-0811-4b9e-a82c-a36987e0db21-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.618280 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6489e730-0811-4b9e-a82c-a36987e0db21-config\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.618333 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6489e730-0811-4b9e-a82c-a36987e0db21-ovs-rundir\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.618410 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6489e730-0811-4b9e-a82c-a36987e0db21-combined-ca-bundle\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.618436 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6489e730-0811-4b9e-a82c-a36987e0db21-ovn-rundir\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.618824 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6489e730-0811-4b9e-a82c-a36987e0db21-ovn-rundir\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.619698 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6489e730-0811-4b9e-a82c-a36987e0db21-ovs-rundir\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.620294 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6489e730-0811-4b9e-a82c-a36987e0db21-config\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.639205 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6489e730-0811-4b9e-a82c-a36987e0db21-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.641127 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khvkq\" (UniqueName: \"kubernetes.io/projected/6489e730-0811-4b9e-a82c-a36987e0db21-kube-api-access-khvkq\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.643093 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6489e730-0811-4b9e-a82c-a36987e0db21-combined-ca-bundle\") pod \"ovn-controller-metrics-wgzgq\" (UID: \"6489e730-0811-4b9e-a82c-a36987e0db21\") " pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.716387 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wgzgq" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.728443 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5db99669-kn4gh"] Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.752300 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd9c84cb5-7fcrr"] Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.754998 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.761092 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.766449 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd9c84cb5-7fcrr"] Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.825438 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.825483 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-dns-svc\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.825502 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-sb\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.825561 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rvrx\" (UniqueName: \"kubernetes.io/projected/00d07511-b138-4075-8d8a-5b14a27917fd-kube-api-access-7rvrx\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.829905 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-config\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.931338 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-config\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.931419 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.931456 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-dns-svc\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.931472 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-sb\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.931512 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rvrx\" (UniqueName: \"kubernetes.io/projected/00d07511-b138-4075-8d8a-5b14a27917fd-kube-api-access-7rvrx\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.933021 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.933124 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-config\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.933587 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-dns-svc\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.933676 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-sb\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.951752 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rvrx\" (UniqueName: \"kubernetes.io/projected/00d07511-b138-4075-8d8a-5b14a27917fd-kube-api-access-7rvrx\") pod \"dnsmasq-dns-7fd9c84cb5-7fcrr\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.975929 4869 generic.go:334] "Generic (PLEG): container finished" podID="d3b25f43-ab33-4712-ac4c-70cf4bba6ce2" containerID="6de05cb70f9d87fe52b4dfd9af1a84e20327741aea92ff2554f0e8690630d8b9" exitCode=0 Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.976006 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2","Type":"ContainerDied","Data":"6de05cb70f9d87fe52b4dfd9af1a84e20327741aea92ff2554f0e8690630d8b9"} Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.980770 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"12b51255-a7f5-4295-9367-e8198b8a5c55","Type":"ContainerDied","Data":"3511b7041497df08d22180b83d99fb29c526f3f9a38399f12479cfd35e9eb846"} Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.981343 4869 generic.go:334] "Generic (PLEG): container finished" podID="12b51255-a7f5-4295-9367-e8198b8a5c55" containerID="3511b7041497df08d22180b83d99fb29c526f3f9a38399f12479cfd35e9eb846" exitCode=0 Sep 29 13:59:43 crc kubenswrapper[4869]: I0929 13:59:43.982454 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.046549 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.123993 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.205387 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5db99669-kn4gh"] Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.276700 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.278481 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.288903 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-9nm79" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.289116 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.289245 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.289362 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.304172 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.319387 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wgzgq"] Sep 29 13:59:44 crc kubenswrapper[4869]: W0929 13:59:44.328285 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6489e730_0811_4b9e_a82c_a36987e0db21.slice/crio-f356ca1321953e998cc0eff665eb25387b839c1ad9fc8f7505929ef53e88f6e8 WatchSource:0}: Error finding container f356ca1321953e998cc0eff665eb25387b839c1ad9fc8f7505929ef53e88f6e8: Status 404 returned error can't find the container with id f356ca1321953e998cc0eff665eb25387b839c1ad9fc8f7505929ef53e88f6e8 Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.445399 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c485924-251a-4fdb-9aef-d32332da2662-config\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.445795 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1c485924-251a-4fdb-9aef-d32332da2662-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.445852 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.445891 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.445929 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c485924-251a-4fdb-9aef-d32332da2662-scripts\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.445979 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqrs4\" (UniqueName: \"kubernetes.io/projected/1c485924-251a-4fdb-9aef-d32332da2662-kube-api-access-wqrs4\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.446041 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.547455 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqrs4\" (UniqueName: \"kubernetes.io/projected/1c485924-251a-4fdb-9aef-d32332da2662-kube-api-access-wqrs4\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.547531 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.547598 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c485924-251a-4fdb-9aef-d32332da2662-config\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.547666 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1c485924-251a-4fdb-9aef-d32332da2662-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.547713 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.547752 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.547790 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c485924-251a-4fdb-9aef-d32332da2662-scripts\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.548413 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1c485924-251a-4fdb-9aef-d32332da2662-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.549105 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c485924-251a-4fdb-9aef-d32332da2662-config\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.549346 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c485924-251a-4fdb-9aef-d32332da2662-scripts\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.555196 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.555402 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.558130 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c485924-251a-4fdb-9aef-d32332da2662-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.571276 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqrs4\" (UniqueName: \"kubernetes.io/projected/1c485924-251a-4fdb-9aef-d32332da2662-kube-api-access-wqrs4\") pod \"ovn-northd-0\" (UID: \"1c485924-251a-4fdb-9aef-d32332da2662\") " pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.623424 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Sep 29 13:59:44 crc kubenswrapper[4869]: I0929 13:59:44.695927 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd9c84cb5-7fcrr"] Sep 29 13:59:44 crc kubenswrapper[4869]: W0929 13:59:44.719932 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00d07511_b138_4075_8d8a_5b14a27917fd.slice/crio-7c30c213b809158afa950ac54014e7db1370f526c91aba8c0616d618c78f9ce9 WatchSource:0}: Error finding container 7c30c213b809158afa950ac54014e7db1370f526c91aba8c0616d618c78f9ce9: Status 404 returned error can't find the container with id 7c30c213b809158afa950ac54014e7db1370f526c91aba8c0616d618c78f9ce9 Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.006344 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d3b25f43-ab33-4712-ac4c-70cf4bba6ce2","Type":"ContainerStarted","Data":"1fdc681ad1370693b0e706aa247db2b676672460a05a173efbda38115ca663a6"} Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.015580 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wgzgq" event={"ID":"6489e730-0811-4b9e-a82c-a36987e0db21","Type":"ContainerStarted","Data":"ab32714c22d667750007fc8c3af1f712f2b0154765c1a18e6d563ae91f12f30d"} Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.015690 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wgzgq" event={"ID":"6489e730-0811-4b9e-a82c-a36987e0db21","Type":"ContainerStarted","Data":"f356ca1321953e998cc0eff665eb25387b839c1ad9fc8f7505929ef53e88f6e8"} Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.018727 4869 generic.go:334] "Generic (PLEG): container finished" podID="5014357d-bf5b-49e2-9d7c-9baffaf3abbb" containerID="7cd89afd9edac34ae73c098f9d63d2e7d12e96e412ce2fdd8f15e6304b6bf809" exitCode=0 Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.018843 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" event={"ID":"5014357d-bf5b-49e2-9d7c-9baffaf3abbb","Type":"ContainerDied","Data":"7cd89afd9edac34ae73c098f9d63d2e7d12e96e412ce2fdd8f15e6304b6bf809"} Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.018909 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" event={"ID":"5014357d-bf5b-49e2-9d7c-9baffaf3abbb","Type":"ContainerStarted","Data":"1125fbb95bba852e6da67a8a3af889a1d073b424115b40b9ef34ef91a292d430"} Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.026930 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"12b51255-a7f5-4295-9367-e8198b8a5c55","Type":"ContainerStarted","Data":"4ff32015ac278d8b964d27f11599c0e4f60e92dd0ecfcce172cc212fd64053b6"} Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.032188 4869 generic.go:334] "Generic (PLEG): container finished" podID="00d07511-b138-4075-8d8a-5b14a27917fd" containerID="d3ee13a0c408b25d95fa68140d328168a817eaa5069987899686955cbd20a002" exitCode=0 Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.032707 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=29.052537034 podStartE2EDuration="37.032689627s" podCreationTimestamp="2025-09-29 13:59:08 +0000 UTC" firstStartedPulling="2025-09-29 13:59:28.452232455 +0000 UTC m=+1094.892876775" lastFinishedPulling="2025-09-29 13:59:36.432385048 +0000 UTC m=+1102.873029368" observedRunningTime="2025-09-29 13:59:45.026089456 +0000 UTC m=+1111.466733776" watchObservedRunningTime="2025-09-29 13:59:45.032689627 +0000 UTC m=+1111.473333947" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.033145 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" event={"ID":"00d07511-b138-4075-8d8a-5b14a27917fd","Type":"ContainerDied","Data":"d3ee13a0c408b25d95fa68140d328168a817eaa5069987899686955cbd20a002"} Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.033215 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" event={"ID":"00d07511-b138-4075-8d8a-5b14a27917fd","Type":"ContainerStarted","Data":"7c30c213b809158afa950ac54014e7db1370f526c91aba8c0616d618c78f9ce9"} Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.064319 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-wgzgq" podStartSLOduration=2.064296869 podStartE2EDuration="2.064296869s" podCreationTimestamp="2025-09-29 13:59:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:59:45.054441643 +0000 UTC m=+1111.495085963" watchObservedRunningTime="2025-09-29 13:59:45.064296869 +0000 UTC m=+1111.504941189" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.090172 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=29.843439167 podStartE2EDuration="38.090151002s" podCreationTimestamp="2025-09-29 13:59:07 +0000 UTC" firstStartedPulling="2025-09-29 13:59:28.455213613 +0000 UTC m=+1094.895857933" lastFinishedPulling="2025-09-29 13:59:36.701925448 +0000 UTC m=+1103.142569768" observedRunningTime="2025-09-29 13:59:45.081110127 +0000 UTC m=+1111.521754447" watchObservedRunningTime="2025-09-29 13:59:45.090151002 +0000 UTC m=+1111.530795322" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.141886 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.186481 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.371508 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.472941 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-dns-svc\") pod \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.473085 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sq9ht\" (UniqueName: \"kubernetes.io/projected/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-kube-api-access-sq9ht\") pod \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.473122 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-ovsdbserver-nb\") pod \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.473243 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-config\") pod \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\" (UID: \"5014357d-bf5b-49e2-9d7c-9baffaf3abbb\") " Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.477206 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-kube-api-access-sq9ht" (OuterVolumeSpecName: "kube-api-access-sq9ht") pod "5014357d-bf5b-49e2-9d7c-9baffaf3abbb" (UID: "5014357d-bf5b-49e2-9d7c-9baffaf3abbb"). InnerVolumeSpecName "kube-api-access-sq9ht". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.502769 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-config" (OuterVolumeSpecName: "config") pod "5014357d-bf5b-49e2-9d7c-9baffaf3abbb" (UID: "5014357d-bf5b-49e2-9d7c-9baffaf3abbb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.503244 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5014357d-bf5b-49e2-9d7c-9baffaf3abbb" (UID: "5014357d-bf5b-49e2-9d7c-9baffaf3abbb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.519763 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5014357d-bf5b-49e2-9d7c-9baffaf3abbb" (UID: "5014357d-bf5b-49e2-9d7c-9baffaf3abbb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.575236 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.575269 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sq9ht\" (UniqueName: \"kubernetes.io/projected/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-kube-api-access-sq9ht\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.575281 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:45 crc kubenswrapper[4869]: I0929 13:59:45.575293 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5014357d-bf5b-49e2-9d7c-9baffaf3abbb-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.041132 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" event={"ID":"00d07511-b138-4075-8d8a-5b14a27917fd","Type":"ContainerStarted","Data":"e4cf54ec200f1345674425f82e0650cc96afed8ec9ca93aa67d312685afc2b8a"} Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.041440 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.043260 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c485924-251a-4fdb-9aef-d32332da2662","Type":"ContainerStarted","Data":"c8444ee068fa66adc10c8bca83bf3d51c609d2bccd6cf85b23e2c8ea14a82dc4"} Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.048280 4869 generic.go:334] "Generic (PLEG): container finished" podID="10a66559-191f-464a-8095-efb79cc5b29f" containerID="1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af" exitCode=0 Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.048349 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerDied","Data":"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af"} Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.052122 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" event={"ID":"5014357d-bf5b-49e2-9d7c-9baffaf3abbb","Type":"ContainerDied","Data":"1125fbb95bba852e6da67a8a3af889a1d073b424115b40b9ef34ef91a292d430"} Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.052184 4869 scope.go:117] "RemoveContainer" containerID="7cd89afd9edac34ae73c098f9d63d2e7d12e96e412ce2fdd8f15e6304b6bf809" Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.052286 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d5db99669-kn4gh" Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.062855 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" podStartSLOduration=3.062839855 podStartE2EDuration="3.062839855s" podCreationTimestamp="2025-09-29 13:59:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 13:59:46.062327362 +0000 UTC m=+1112.502971682" watchObservedRunningTime="2025-09-29 13:59:46.062839855 +0000 UTC m=+1112.503484175" Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.130704 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d5db99669-kn4gh"] Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.152125 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d5db99669-kn4gh"] Sep 29 13:59:46 crc kubenswrapper[4869]: I0929 13:59:46.253684 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5014357d-bf5b-49e2-9d7c-9baffaf3abbb" path="/var/lib/kubelet/pods/5014357d-bf5b-49e2-9d7c-9baffaf3abbb/volumes" Sep 29 13:59:47 crc kubenswrapper[4869]: I0929 13:59:47.063441 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c485924-251a-4fdb-9aef-d32332da2662","Type":"ContainerStarted","Data":"923cae925dc31ce28e0b4a5ae79f8c8c11fc3c0f3c206a11392236015e322d27"} Sep 29 13:59:47 crc kubenswrapper[4869]: I0929 13:59:47.063834 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Sep 29 13:59:47 crc kubenswrapper[4869]: I0929 13:59:47.063869 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c485924-251a-4fdb-9aef-d32332da2662","Type":"ContainerStarted","Data":"2e4caf17d33eb06d690830bbe8a6574f2e6873f3c7a6f419587eb439676ec756"} Sep 29 13:59:47 crc kubenswrapper[4869]: I0929 13:59:47.086201 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.335467256 podStartE2EDuration="3.086184997s" podCreationTimestamp="2025-09-29 13:59:44 +0000 UTC" firstStartedPulling="2025-09-29 13:59:45.199721491 +0000 UTC m=+1111.640365811" lastFinishedPulling="2025-09-29 13:59:45.950439232 +0000 UTC m=+1112.391083552" observedRunningTime="2025-09-29 13:59:47.085854559 +0000 UTC m=+1113.526498879" watchObservedRunningTime="2025-09-29 13:59:47.086184997 +0000 UTC m=+1113.526829307" Sep 29 13:59:48 crc kubenswrapper[4869]: I0929 13:59:48.532929 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Sep 29 13:59:48 crc kubenswrapper[4869]: I0929 13:59:48.532986 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Sep 29 13:59:49 crc kubenswrapper[4869]: I0929 13:59:49.484472 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:49 crc kubenswrapper[4869]: I0929 13:59:49.484866 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:50 crc kubenswrapper[4869]: I0929 13:59:50.128678 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Sep 29 13:59:50 crc kubenswrapper[4869]: I0929 13:59:50.210287 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Sep 29 13:59:50 crc kubenswrapper[4869]: I0929 13:59:50.657019 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 13:59:50 crc kubenswrapper[4869]: I0929 13:59:50.657334 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.549989 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.612201 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.743482 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.819374 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-5frp9"] Sep 29 13:59:51 crc kubenswrapper[4869]: E0929 13:59:51.819813 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5014357d-bf5b-49e2-9d7c-9baffaf3abbb" containerName="init" Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.819830 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5014357d-bf5b-49e2-9d7c-9baffaf3abbb" containerName="init" Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.820021 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5014357d-bf5b-49e2-9d7c-9baffaf3abbb" containerName="init" Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.820726 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-5frp9" Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.835024 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-5frp9"] Sep 29 13:59:51 crc kubenswrapper[4869]: I0929 13:59:51.921872 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzt7f\" (UniqueName: \"kubernetes.io/projected/131d94d1-1122-44ab-99b9-ca4b0801beac-kube-api-access-jzt7f\") pod \"watcher-db-create-5frp9\" (UID: \"131d94d1-1122-44ab-99b9-ca4b0801beac\") " pod="openstack/watcher-db-create-5frp9" Sep 29 13:59:52 crc kubenswrapper[4869]: I0929 13:59:52.023387 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzt7f\" (UniqueName: \"kubernetes.io/projected/131d94d1-1122-44ab-99b9-ca4b0801beac-kube-api-access-jzt7f\") pod \"watcher-db-create-5frp9\" (UID: \"131d94d1-1122-44ab-99b9-ca4b0801beac\") " pod="openstack/watcher-db-create-5frp9" Sep 29 13:59:52 crc kubenswrapper[4869]: I0929 13:59:52.043239 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzt7f\" (UniqueName: \"kubernetes.io/projected/131d94d1-1122-44ab-99b9-ca4b0801beac-kube-api-access-jzt7f\") pod \"watcher-db-create-5frp9\" (UID: \"131d94d1-1122-44ab-99b9-ca4b0801beac\") " pod="openstack/watcher-db-create-5frp9" Sep 29 13:59:52 crc kubenswrapper[4869]: I0929 13:59:52.143768 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-5frp9" Sep 29 13:59:53 crc kubenswrapper[4869]: I0929 13:59:53.330011 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-5frp9"] Sep 29 13:59:53 crc kubenswrapper[4869]: W0929 13:59:53.332027 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod131d94d1_1122_44ab_99b9_ca4b0801beac.slice/crio-01ce07bad4df0f0344ee19a4f924b864f8df7e1bf81dbde936d8da1a13025039 WatchSource:0}: Error finding container 01ce07bad4df0f0344ee19a4f924b864f8df7e1bf81dbde936d8da1a13025039: Status 404 returned error can't find the container with id 01ce07bad4df0f0344ee19a4f924b864f8df7e1bf81dbde936d8da1a13025039 Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.125812 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.129095 4869 generic.go:334] "Generic (PLEG): container finished" podID="131d94d1-1122-44ab-99b9-ca4b0801beac" containerID="ebbe9aee078ad6abbbd5bfcb85e663b5e65f7015187c693376a07430219644ab" exitCode=0 Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.129147 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-5frp9" event={"ID":"131d94d1-1122-44ab-99b9-ca4b0801beac","Type":"ContainerDied","Data":"ebbe9aee078ad6abbbd5bfcb85e663b5e65f7015187c693376a07430219644ab"} Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.129200 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-5frp9" event={"ID":"131d94d1-1122-44ab-99b9-ca4b0801beac","Type":"ContainerStarted","Data":"01ce07bad4df0f0344ee19a4f924b864f8df7e1bf81dbde936d8da1a13025039"} Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.218639 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bf4c785d5-6ck9l"] Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.218953 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" podUID="087de56b-f2f8-425d-9760-df2ec10ecd9c" containerName="dnsmasq-dns" containerID="cri-o://b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42" gracePeriod=10 Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.747819 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.872551 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-dns-svc\") pod \"087de56b-f2f8-425d-9760-df2ec10ecd9c\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.872978 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-config\") pod \"087de56b-f2f8-425d-9760-df2ec10ecd9c\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.873038 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8wtg\" (UniqueName: \"kubernetes.io/projected/087de56b-f2f8-425d-9760-df2ec10ecd9c-kube-api-access-r8wtg\") pod \"087de56b-f2f8-425d-9760-df2ec10ecd9c\" (UID: \"087de56b-f2f8-425d-9760-df2ec10ecd9c\") " Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.878791 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/087de56b-f2f8-425d-9760-df2ec10ecd9c-kube-api-access-r8wtg" (OuterVolumeSpecName: "kube-api-access-r8wtg") pod "087de56b-f2f8-425d-9760-df2ec10ecd9c" (UID: "087de56b-f2f8-425d-9760-df2ec10ecd9c"). InnerVolumeSpecName "kube-api-access-r8wtg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.924925 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-config" (OuterVolumeSpecName: "config") pod "087de56b-f2f8-425d-9760-df2ec10ecd9c" (UID: "087de56b-f2f8-425d-9760-df2ec10ecd9c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.931916 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "087de56b-f2f8-425d-9760-df2ec10ecd9c" (UID: "087de56b-f2f8-425d-9760-df2ec10ecd9c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.975632 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.975669 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/087de56b-f2f8-425d-9760-df2ec10ecd9c-config\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:54 crc kubenswrapper[4869]: I0929 13:59:54.975684 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8wtg\" (UniqueName: \"kubernetes.io/projected/087de56b-f2f8-425d-9760-df2ec10ecd9c-kube-api-access-r8wtg\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.141600 4869 generic.go:334] "Generic (PLEG): container finished" podID="087de56b-f2f8-425d-9760-df2ec10ecd9c" containerID="b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42" exitCode=0 Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.141686 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.141685 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" event={"ID":"087de56b-f2f8-425d-9760-df2ec10ecd9c","Type":"ContainerDied","Data":"b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42"} Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.141768 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf4c785d5-6ck9l" event={"ID":"087de56b-f2f8-425d-9760-df2ec10ecd9c","Type":"ContainerDied","Data":"394cd4712c9cbc252a077db0f9849ec9e9e0906be8413593ac489dd479064a08"} Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.141803 4869 scope.go:117] "RemoveContainer" containerID="b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.145161 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerStarted","Data":"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d"} Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.165505 4869 scope.go:117] "RemoveContainer" containerID="d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.190102 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bf4c785d5-6ck9l"] Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.200116 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bf4c785d5-6ck9l"] Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.203762 4869 scope.go:117] "RemoveContainer" containerID="b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42" Sep 29 13:59:55 crc kubenswrapper[4869]: E0929 13:59:55.204458 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42\": container with ID starting with b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42 not found: ID does not exist" containerID="b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.204536 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42"} err="failed to get container status \"b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42\": rpc error: code = NotFound desc = could not find container \"b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42\": container with ID starting with b27838a79ab636693ac573b0402476f8302fdfd5fc54201e33952658a48dfc42 not found: ID does not exist" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.204586 4869 scope.go:117] "RemoveContainer" containerID="d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5" Sep 29 13:59:55 crc kubenswrapper[4869]: E0929 13:59:55.204917 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5\": container with ID starting with d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5 not found: ID does not exist" containerID="d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.204950 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5"} err="failed to get container status \"d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5\": rpc error: code = NotFound desc = could not find container \"d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5\": container with ID starting with d1c066b0a59a8633178d7598e6e6760d6b3c73b63a67a8162b0f58b4733b95c5 not found: ID does not exist" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.303979 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-zmpk5"] Sep 29 13:59:55 crc kubenswrapper[4869]: E0929 13:59:55.304487 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087de56b-f2f8-425d-9760-df2ec10ecd9c" containerName="init" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.304507 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="087de56b-f2f8-425d-9760-df2ec10ecd9c" containerName="init" Sep 29 13:59:55 crc kubenswrapper[4869]: E0929 13:59:55.304525 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087de56b-f2f8-425d-9760-df2ec10ecd9c" containerName="dnsmasq-dns" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.304534 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="087de56b-f2f8-425d-9760-df2ec10ecd9c" containerName="dnsmasq-dns" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.304801 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="087de56b-f2f8-425d-9760-df2ec10ecd9c" containerName="dnsmasq-dns" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.305978 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zmpk5" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.316243 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-zmpk5"] Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.393576 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlkhd\" (UniqueName: \"kubernetes.io/projected/8e8902bc-102d-40ac-b875-891302ebd0fb-kube-api-access-tlkhd\") pod \"glance-db-create-zmpk5\" (UID: \"8e8902bc-102d-40ac-b875-891302ebd0fb\") " pod="openstack/glance-db-create-zmpk5" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.494802 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlkhd\" (UniqueName: \"kubernetes.io/projected/8e8902bc-102d-40ac-b875-891302ebd0fb-kube-api-access-tlkhd\") pod \"glance-db-create-zmpk5\" (UID: \"8e8902bc-102d-40ac-b875-891302ebd0fb\") " pod="openstack/glance-db-create-zmpk5" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.523879 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlkhd\" (UniqueName: \"kubernetes.io/projected/8e8902bc-102d-40ac-b875-891302ebd0fb-kube-api-access-tlkhd\") pod \"glance-db-create-zmpk5\" (UID: \"8e8902bc-102d-40ac-b875-891302ebd0fb\") " pod="openstack/glance-db-create-zmpk5" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.591070 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-5frp9" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.632243 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zmpk5" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.697249 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzt7f\" (UniqueName: \"kubernetes.io/projected/131d94d1-1122-44ab-99b9-ca4b0801beac-kube-api-access-jzt7f\") pod \"131d94d1-1122-44ab-99b9-ca4b0801beac\" (UID: \"131d94d1-1122-44ab-99b9-ca4b0801beac\") " Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.702275 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/131d94d1-1122-44ab-99b9-ca4b0801beac-kube-api-access-jzt7f" (OuterVolumeSpecName: "kube-api-access-jzt7f") pod "131d94d1-1122-44ab-99b9-ca4b0801beac" (UID: "131d94d1-1122-44ab-99b9-ca4b0801beac"). InnerVolumeSpecName "kube-api-access-jzt7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:59:55 crc kubenswrapper[4869]: I0929 13:59:55.800286 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzt7f\" (UniqueName: \"kubernetes.io/projected/131d94d1-1122-44ab-99b9-ca4b0801beac-kube-api-access-jzt7f\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:56 crc kubenswrapper[4869]: I0929 13:59:56.048608 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-zmpk5"] Sep 29 13:59:56 crc kubenswrapper[4869]: I0929 13:59:56.161960 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-5frp9" event={"ID":"131d94d1-1122-44ab-99b9-ca4b0801beac","Type":"ContainerDied","Data":"01ce07bad4df0f0344ee19a4f924b864f8df7e1bf81dbde936d8da1a13025039"} Sep 29 13:59:56 crc kubenswrapper[4869]: I0929 13:59:56.162316 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="01ce07bad4df0f0344ee19a4f924b864f8df7e1bf81dbde936d8da1a13025039" Sep 29 13:59:56 crc kubenswrapper[4869]: I0929 13:59:56.161999 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-5frp9" Sep 29 13:59:56 crc kubenswrapper[4869]: W0929 13:59:56.196197 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e8902bc_102d_40ac_b875_891302ebd0fb.slice/crio-2ec60cc8885b5beec3b8f9f2cea8716da5b0d563a656b9b70ef6024f2a6f1481 WatchSource:0}: Error finding container 2ec60cc8885b5beec3b8f9f2cea8716da5b0d563a656b9b70ef6024f2a6f1481: Status 404 returned error can't find the container with id 2ec60cc8885b5beec3b8f9f2cea8716da5b0d563a656b9b70ef6024f2a6f1481 Sep 29 13:59:56 crc kubenswrapper[4869]: I0929 13:59:56.263363 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="087de56b-f2f8-425d-9760-df2ec10ecd9c" path="/var/lib/kubelet/pods/087de56b-f2f8-425d-9760-df2ec10ecd9c/volumes" Sep 29 13:59:57 crc kubenswrapper[4869]: I0929 13:59:57.172076 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zmpk5" event={"ID":"8e8902bc-102d-40ac-b875-891302ebd0fb","Type":"ContainerStarted","Data":"2ec60cc8885b5beec3b8f9f2cea8716da5b0d563a656b9b70ef6024f2a6f1481"} Sep 29 13:59:58 crc kubenswrapper[4869]: I0929 13:59:58.180684 4869 generic.go:334] "Generic (PLEG): container finished" podID="8e8902bc-102d-40ac-b875-891302ebd0fb" containerID="1fbbcb9dfe347b34253e56ee4a06027b0b8b34b909a9c0562503725e1b2b608d" exitCode=0 Sep 29 13:59:58 crc kubenswrapper[4869]: I0929 13:59:58.180780 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zmpk5" event={"ID":"8e8902bc-102d-40ac-b875-891302ebd0fb","Type":"ContainerDied","Data":"1fbbcb9dfe347b34253e56ee4a06027b0b8b34b909a9c0562503725e1b2b608d"} Sep 29 13:59:58 crc kubenswrapper[4869]: I0929 13:59:58.185094 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerStarted","Data":"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8"} Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.197239 4869 generic.go:334] "Generic (PLEG): container finished" podID="0b77ed89-e796-4138-ae2c-fcd5f2125233" containerID="e3735329e4b0427828eeb3559bd49f912140d58e93caf291389a25c53a02a95a" exitCode=0 Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.197318 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"0b77ed89-e796-4138-ae2c-fcd5f2125233","Type":"ContainerDied","Data":"e3735329e4b0427828eeb3559bd49f912140d58e93caf291389a25c53a02a95a"} Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.200228 4869 generic.go:334] "Generic (PLEG): container finished" podID="0d97e3c5-9850-428b-9d88-89307901912d" containerID="de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082" exitCode=0 Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.200347 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0d97e3c5-9850-428b-9d88-89307901912d","Type":"ContainerDied","Data":"de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082"} Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.577215 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zmpk5" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.669330 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlkhd\" (UniqueName: \"kubernetes.io/projected/8e8902bc-102d-40ac-b875-891302ebd0fb-kube-api-access-tlkhd\") pod \"8e8902bc-102d-40ac-b875-891302ebd0fb\" (UID: \"8e8902bc-102d-40ac-b875-891302ebd0fb\") " Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.674742 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e8902bc-102d-40ac-b875-891302ebd0fb-kube-api-access-tlkhd" (OuterVolumeSpecName: "kube-api-access-tlkhd") pod "8e8902bc-102d-40ac-b875-891302ebd0fb" (UID: "8e8902bc-102d-40ac-b875-891302ebd0fb"). InnerVolumeSpecName "kube-api-access-tlkhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.682760 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.771509 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlkhd\" (UniqueName: \"kubernetes.io/projected/8e8902bc-102d-40ac-b875-891302ebd0fb-kube-api-access-tlkhd\") on node \"crc\" DevicePath \"\"" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.785985 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-9jmg5"] Sep 29 13:59:59 crc kubenswrapper[4869]: E0929 13:59:59.786356 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131d94d1-1122-44ab-99b9-ca4b0801beac" containerName="mariadb-database-create" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.786373 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="131d94d1-1122-44ab-99b9-ca4b0801beac" containerName="mariadb-database-create" Sep 29 13:59:59 crc kubenswrapper[4869]: E0929 13:59:59.786400 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e8902bc-102d-40ac-b875-891302ebd0fb" containerName="mariadb-database-create" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.786410 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e8902bc-102d-40ac-b875-891302ebd0fb" containerName="mariadb-database-create" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.786565 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="131d94d1-1122-44ab-99b9-ca4b0801beac" containerName="mariadb-database-create" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.786595 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e8902bc-102d-40ac-b875-891302ebd0fb" containerName="mariadb-database-create" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.787214 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9jmg5" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.794395 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-9jmg5"] Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.873500 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkg7j\" (UniqueName: \"kubernetes.io/projected/89f8ac17-8c0d-475c-85a1-93847e5b0d8a-kube-api-access-pkg7j\") pod \"keystone-db-create-9jmg5\" (UID: \"89f8ac17-8c0d-475c-85a1-93847e5b0d8a\") " pod="openstack/keystone-db-create-9jmg5" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.975261 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkg7j\" (UniqueName: \"kubernetes.io/projected/89f8ac17-8c0d-475c-85a1-93847e5b0d8a-kube-api-access-pkg7j\") pod \"keystone-db-create-9jmg5\" (UID: \"89f8ac17-8c0d-475c-85a1-93847e5b0d8a\") " pod="openstack/keystone-db-create-9jmg5" Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.998416 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-md6jn"] Sep 29 13:59:59 crc kubenswrapper[4869]: I0929 13:59:59.999858 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-md6jn" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.005437 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkg7j\" (UniqueName: \"kubernetes.io/projected/89f8ac17-8c0d-475c-85a1-93847e5b0d8a-kube-api-access-pkg7j\") pod \"keystone-db-create-9jmg5\" (UID: \"89f8ac17-8c0d-475c-85a1-93847e5b0d8a\") " pod="openstack/keystone-db-create-9jmg5" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.010548 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-md6jn"] Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.076576 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn5dr\" (UniqueName: \"kubernetes.io/projected/497f91e5-218a-4f87-b810-0fc7d7a98a71-kube-api-access-sn5dr\") pod \"placement-db-create-md6jn\" (UID: \"497f91e5-218a-4f87-b810-0fc7d7a98a71\") " pod="openstack/placement-db-create-md6jn" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.101110 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9jmg5" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.155830 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c"] Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.157442 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.167684 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.168498 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.176078 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c"] Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.177967 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn5dr\" (UniqueName: \"kubernetes.io/projected/497f91e5-218a-4f87-b810-0fc7d7a98a71-kube-api-access-sn5dr\") pod \"placement-db-create-md6jn\" (UID: \"497f91e5-218a-4f87-b810-0fc7d7a98a71\") " pod="openstack/placement-db-create-md6jn" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.203365 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn5dr\" (UniqueName: \"kubernetes.io/projected/497f91e5-218a-4f87-b810-0fc7d7a98a71-kube-api-access-sn5dr\") pod \"placement-db-create-md6jn\" (UID: \"497f91e5-218a-4f87-b810-0fc7d7a98a71\") " pod="openstack/placement-db-create-md6jn" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.211979 4869 generic.go:334] "Generic (PLEG): container finished" podID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerID="17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5" exitCode=0 Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.212055 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ffba5854-b48f-4fd4-ba4b-0f1a0601239d","Type":"ContainerDied","Data":"17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5"} Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.223998 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zmpk5" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.223998 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zmpk5" event={"ID":"8e8902bc-102d-40ac-b875-891302ebd0fb","Type":"ContainerDied","Data":"2ec60cc8885b5beec3b8f9f2cea8716da5b0d563a656b9b70ef6024f2a6f1481"} Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.224162 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ec60cc8885b5beec3b8f9f2cea8716da5b0d563a656b9b70ef6024f2a6f1481" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.238219 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0d97e3c5-9850-428b-9d88-89307901912d","Type":"ContainerStarted","Data":"bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2"} Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.239438 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.274891 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"0b77ed89-e796-4138-ae2c-fcd5f2125233","Type":"ContainerStarted","Data":"8fc1a31e9fe26c5e1a97c3f619e4016f25893a3692f9ddbee5d7cc2c17c09ea8"} Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.275181 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-notifications-server-0" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.279425 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8qtx\" (UniqueName: \"kubernetes.io/projected/d4467c3d-879c-4833-bfbd-7b6308962682-kube-api-access-d8qtx\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.279534 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4467c3d-879c-4833-bfbd-7b6308962682-config-volume\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.279560 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4467c3d-879c-4833-bfbd-7b6308962682-secret-volume\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.318003 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-md6jn" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.359112 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=43.227684803 podStartE2EDuration="56.359086026s" podCreationTimestamp="2025-09-29 13:59:04 +0000 UTC" firstStartedPulling="2025-09-29 13:59:14.758710999 +0000 UTC m=+1081.199355319" lastFinishedPulling="2025-09-29 13:59:27.890112222 +0000 UTC m=+1094.330756542" observedRunningTime="2025-09-29 14:00:00.322684739 +0000 UTC m=+1126.763329079" watchObservedRunningTime="2025-09-29 14:00:00.359086026 +0000 UTC m=+1126.799730346" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.360734 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-notifications-server-0" podStartSLOduration=55.360727228 podStartE2EDuration="55.360727228s" podCreationTimestamp="2025-09-29 13:59:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:00:00.359050735 +0000 UTC m=+1126.799695075" watchObservedRunningTime="2025-09-29 14:00:00.360727228 +0000 UTC m=+1126.801371548" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.381068 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8qtx\" (UniqueName: \"kubernetes.io/projected/d4467c3d-879c-4833-bfbd-7b6308962682-kube-api-access-d8qtx\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.381199 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4467c3d-879c-4833-bfbd-7b6308962682-config-volume\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.381220 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4467c3d-879c-4833-bfbd-7b6308962682-secret-volume\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.386555 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4467c3d-879c-4833-bfbd-7b6308962682-config-volume\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.399981 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4467c3d-879c-4833-bfbd-7b6308962682-secret-volume\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.402365 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8qtx\" (UniqueName: \"kubernetes.io/projected/d4467c3d-879c-4833-bfbd-7b6308962682-kube-api-access-d8qtx\") pod \"collect-profiles-29319240-nq45c\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.480461 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.728754 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-9jmg5"] Sep 29 14:00:00 crc kubenswrapper[4869]: W0929 14:00:00.732713 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89f8ac17_8c0d_475c_85a1_93847e5b0d8a.slice/crio-1ff0e5d6e34220747a39ffff9f676e6a5e784ac241d21335453ce2e6791cbcd4 WatchSource:0}: Error finding container 1ff0e5d6e34220747a39ffff9f676e6a5e784ac241d21335453ce2e6791cbcd4: Status 404 returned error can't find the container with id 1ff0e5d6e34220747a39ffff9f676e6a5e784ac241d21335453ce2e6791cbcd4 Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.802616 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c"] Sep 29 14:00:00 crc kubenswrapper[4869]: I0929 14:00:00.907856 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-md6jn"] Sep 29 14:00:00 crc kubenswrapper[4869]: W0929 14:00:00.917824 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod497f91e5_218a_4f87_b810_0fc7d7a98a71.slice/crio-b25528d88b8705fbe6686e8dfb5368ae99939d3e197d72718368e6bfc9457bf5 WatchSource:0}: Error finding container b25528d88b8705fbe6686e8dfb5368ae99939d3e197d72718368e6bfc9457bf5: Status 404 returned error can't find the container with id b25528d88b8705fbe6686e8dfb5368ae99939d3e197d72718368e6bfc9457bf5 Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.256895 4869 generic.go:334] "Generic (PLEG): container finished" podID="497f91e5-218a-4f87-b810-0fc7d7a98a71" containerID="94bc501c9d814f7e40f5ac59bc88ab0f4271ac3044458024f7b1f159f0b7e03e" exitCode=0 Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.256958 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-md6jn" event={"ID":"497f91e5-218a-4f87-b810-0fc7d7a98a71","Type":"ContainerDied","Data":"94bc501c9d814f7e40f5ac59bc88ab0f4271ac3044458024f7b1f159f0b7e03e"} Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.257303 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-md6jn" event={"ID":"497f91e5-218a-4f87-b810-0fc7d7a98a71","Type":"ContainerStarted","Data":"b25528d88b8705fbe6686e8dfb5368ae99939d3e197d72718368e6bfc9457bf5"} Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.261961 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ffba5854-b48f-4fd4-ba4b-0f1a0601239d","Type":"ContainerStarted","Data":"11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6"} Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.262158 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.263827 4869 generic.go:334] "Generic (PLEG): container finished" podID="89f8ac17-8c0d-475c-85a1-93847e5b0d8a" containerID="7e6e4854ba5509011e9b70e9f6aed6282c1107094be529bbd49099bbbfb6ee1f" exitCode=0 Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.263946 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9jmg5" event={"ID":"89f8ac17-8c0d-475c-85a1-93847e5b0d8a","Type":"ContainerDied","Data":"7e6e4854ba5509011e9b70e9f6aed6282c1107094be529bbd49099bbbfb6ee1f"} Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.263979 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9jmg5" event={"ID":"89f8ac17-8c0d-475c-85a1-93847e5b0d8a","Type":"ContainerStarted","Data":"1ff0e5d6e34220747a39ffff9f676e6a5e784ac241d21335453ce2e6791cbcd4"} Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.265433 4869 generic.go:334] "Generic (PLEG): container finished" podID="d4467c3d-879c-4833-bfbd-7b6308962682" containerID="380534b77da008affdafba5e0bfe7f51993ac6c02afdce9a2e13d8faa7fd4902" exitCode=0 Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.265493 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" event={"ID":"d4467c3d-879c-4833-bfbd-7b6308962682","Type":"ContainerDied","Data":"380534b77da008affdafba5e0bfe7f51993ac6c02afdce9a2e13d8faa7fd4902"} Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.265515 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" event={"ID":"d4467c3d-879c-4833-bfbd-7b6308962682","Type":"ContainerStarted","Data":"e827d4ff252e975d58f26faf5d4cb9b6cc852779e88cec329a37176e1e77f874"} Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.320833 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=57.320811756 podStartE2EDuration="57.320811756s" podCreationTimestamp="2025-09-29 13:59:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:00:01.319630205 +0000 UTC m=+1127.760274525" watchObservedRunningTime="2025-09-29 14:00:01.320811756 +0000 UTC m=+1127.761456086" Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.805220 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-ab6b-account-create-m26p6"] Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.806813 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-ab6b-account-create-m26p6" Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.808913 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.811425 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-ab6b-account-create-m26p6"] Sep 29 14:00:01 crc kubenswrapper[4869]: I0929 14:00:01.910059 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gw5l\" (UniqueName: \"kubernetes.io/projected/368fafef-d0b7-42b8-b2fa-73310a8111fc-kube-api-access-9gw5l\") pod \"watcher-ab6b-account-create-m26p6\" (UID: \"368fafef-d0b7-42b8-b2fa-73310a8111fc\") " pod="openstack/watcher-ab6b-account-create-m26p6" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.011786 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gw5l\" (UniqueName: \"kubernetes.io/projected/368fafef-d0b7-42b8-b2fa-73310a8111fc-kube-api-access-9gw5l\") pod \"watcher-ab6b-account-create-m26p6\" (UID: \"368fafef-d0b7-42b8-b2fa-73310a8111fc\") " pod="openstack/watcher-ab6b-account-create-m26p6" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.038446 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gw5l\" (UniqueName: \"kubernetes.io/projected/368fafef-d0b7-42b8-b2fa-73310a8111fc-kube-api-access-9gw5l\") pod \"watcher-ab6b-account-create-m26p6\" (UID: \"368fafef-d0b7-42b8-b2fa-73310a8111fc\") " pod="openstack/watcher-ab6b-account-create-m26p6" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.124770 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-ab6b-account-create-m26p6" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.643140 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-ab6b-account-create-m26p6"] Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.808793 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.814692 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9jmg5" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.821138 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-md6jn" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.929559 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4467c3d-879c-4833-bfbd-7b6308962682-secret-volume\") pod \"d4467c3d-879c-4833-bfbd-7b6308962682\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.929674 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn5dr\" (UniqueName: \"kubernetes.io/projected/497f91e5-218a-4f87-b810-0fc7d7a98a71-kube-api-access-sn5dr\") pod \"497f91e5-218a-4f87-b810-0fc7d7a98a71\" (UID: \"497f91e5-218a-4f87-b810-0fc7d7a98a71\") " Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.929699 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkg7j\" (UniqueName: \"kubernetes.io/projected/89f8ac17-8c0d-475c-85a1-93847e5b0d8a-kube-api-access-pkg7j\") pod \"89f8ac17-8c0d-475c-85a1-93847e5b0d8a\" (UID: \"89f8ac17-8c0d-475c-85a1-93847e5b0d8a\") " Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.929742 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8qtx\" (UniqueName: \"kubernetes.io/projected/d4467c3d-879c-4833-bfbd-7b6308962682-kube-api-access-d8qtx\") pod \"d4467c3d-879c-4833-bfbd-7b6308962682\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.929885 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4467c3d-879c-4833-bfbd-7b6308962682-config-volume\") pod \"d4467c3d-879c-4833-bfbd-7b6308962682\" (UID: \"d4467c3d-879c-4833-bfbd-7b6308962682\") " Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.930779 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4467c3d-879c-4833-bfbd-7b6308962682-config-volume" (OuterVolumeSpecName: "config-volume") pod "d4467c3d-879c-4833-bfbd-7b6308962682" (UID: "d4467c3d-879c-4833-bfbd-7b6308962682"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.935774 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4467c3d-879c-4833-bfbd-7b6308962682-kube-api-access-d8qtx" (OuterVolumeSpecName: "kube-api-access-d8qtx") pod "d4467c3d-879c-4833-bfbd-7b6308962682" (UID: "d4467c3d-879c-4833-bfbd-7b6308962682"). InnerVolumeSpecName "kube-api-access-d8qtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.936253 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/497f91e5-218a-4f87-b810-0fc7d7a98a71-kube-api-access-sn5dr" (OuterVolumeSpecName: "kube-api-access-sn5dr") pod "497f91e5-218a-4f87-b810-0fc7d7a98a71" (UID: "497f91e5-218a-4f87-b810-0fc7d7a98a71"). InnerVolumeSpecName "kube-api-access-sn5dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.937205 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89f8ac17-8c0d-475c-85a1-93847e5b0d8a-kube-api-access-pkg7j" (OuterVolumeSpecName: "kube-api-access-pkg7j") pod "89f8ac17-8c0d-475c-85a1-93847e5b0d8a" (UID: "89f8ac17-8c0d-475c-85a1-93847e5b0d8a"). InnerVolumeSpecName "kube-api-access-pkg7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:02 crc kubenswrapper[4869]: I0929 14:00:02.937240 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4467c3d-879c-4833-bfbd-7b6308962682-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d4467c3d-879c-4833-bfbd-7b6308962682" (UID: "d4467c3d-879c-4833-bfbd-7b6308962682"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.031799 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4467c3d-879c-4833-bfbd-7b6308962682-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.031845 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4467c3d-879c-4833-bfbd-7b6308962682-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.031858 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn5dr\" (UniqueName: \"kubernetes.io/projected/497f91e5-218a-4f87-b810-0fc7d7a98a71-kube-api-access-sn5dr\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.031872 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkg7j\" (UniqueName: \"kubernetes.io/projected/89f8ac17-8c0d-475c-85a1-93847e5b0d8a-kube-api-access-pkg7j\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.031883 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8qtx\" (UniqueName: \"kubernetes.io/projected/d4467c3d-879c-4833-bfbd-7b6308962682-kube-api-access-d8qtx\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.285268 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9jmg5" event={"ID":"89f8ac17-8c0d-475c-85a1-93847e5b0d8a","Type":"ContainerDied","Data":"1ff0e5d6e34220747a39ffff9f676e6a5e784ac241d21335453ce2e6791cbcd4"} Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.285311 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9jmg5" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.285322 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ff0e5d6e34220747a39ffff9f676e6a5e784ac241d21335453ce2e6791cbcd4" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.287202 4869 generic.go:334] "Generic (PLEG): container finished" podID="368fafef-d0b7-42b8-b2fa-73310a8111fc" containerID="7caa789610a9eb785e8d8f80106a813130a414966c9fc65155c8e879dbd0e3e3" exitCode=0 Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.287374 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-ab6b-account-create-m26p6" event={"ID":"368fafef-d0b7-42b8-b2fa-73310a8111fc","Type":"ContainerDied","Data":"7caa789610a9eb785e8d8f80106a813130a414966c9fc65155c8e879dbd0e3e3"} Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.287565 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-ab6b-account-create-m26p6" event={"ID":"368fafef-d0b7-42b8-b2fa-73310a8111fc","Type":"ContainerStarted","Data":"69bfbd4e60c9de4d9d9d4a37bb9794acd346f71322db6fb19b94cfc2f994207a"} Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.289118 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" event={"ID":"d4467c3d-879c-4833-bfbd-7b6308962682","Type":"ContainerDied","Data":"e827d4ff252e975d58f26faf5d4cb9b6cc852779e88cec329a37176e1e77f874"} Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.289163 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e827d4ff252e975d58f26faf5d4cb9b6cc852779e88cec329a37176e1e77f874" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.289615 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.291244 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-md6jn" event={"ID":"497f91e5-218a-4f87-b810-0fc7d7a98a71","Type":"ContainerDied","Data":"b25528d88b8705fbe6686e8dfb5368ae99939d3e197d72718368e6bfc9457bf5"} Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.291316 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-md6jn" Sep 29 14:00:03 crc kubenswrapper[4869]: I0929 14:00:03.291332 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b25528d88b8705fbe6686e8dfb5368ae99939d3e197d72718368e6bfc9457bf5" Sep 29 14:00:04 crc kubenswrapper[4869]: I0929 14:00:04.611853 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-ab6b-account-create-m26p6" Sep 29 14:00:04 crc kubenswrapper[4869]: I0929 14:00:04.762527 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gw5l\" (UniqueName: \"kubernetes.io/projected/368fafef-d0b7-42b8-b2fa-73310a8111fc-kube-api-access-9gw5l\") pod \"368fafef-d0b7-42b8-b2fa-73310a8111fc\" (UID: \"368fafef-d0b7-42b8-b2fa-73310a8111fc\") " Sep 29 14:00:04 crc kubenswrapper[4869]: I0929 14:00:04.767639 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/368fafef-d0b7-42b8-b2fa-73310a8111fc-kube-api-access-9gw5l" (OuterVolumeSpecName: "kube-api-access-9gw5l") pod "368fafef-d0b7-42b8-b2fa-73310a8111fc" (UID: "368fafef-d0b7-42b8-b2fa-73310a8111fc"). InnerVolumeSpecName "kube-api-access-9gw5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:04 crc kubenswrapper[4869]: I0929 14:00:04.864136 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gw5l\" (UniqueName: \"kubernetes.io/projected/368fafef-d0b7-42b8-b2fa-73310a8111fc-kube-api-access-9gw5l\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.309987 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-ab6b-account-create-m26p6" event={"ID":"368fafef-d0b7-42b8-b2fa-73310a8111fc","Type":"ContainerDied","Data":"69bfbd4e60c9de4d9d9d4a37bb9794acd346f71322db6fb19b94cfc2f994207a"} Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.310295 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69bfbd4e60c9de4d9d9d4a37bb9794acd346f71322db6fb19b94cfc2f994207a" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.310064 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-ab6b-account-create-m26p6" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.430804 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-a035-account-create-kmx6v"] Sep 29 14:00:05 crc kubenswrapper[4869]: E0929 14:00:05.431158 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f8ac17-8c0d-475c-85a1-93847e5b0d8a" containerName="mariadb-database-create" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.431178 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f8ac17-8c0d-475c-85a1-93847e5b0d8a" containerName="mariadb-database-create" Sep 29 14:00:05 crc kubenswrapper[4869]: E0929 14:00:05.431194 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4467c3d-879c-4833-bfbd-7b6308962682" containerName="collect-profiles" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.431200 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4467c3d-879c-4833-bfbd-7b6308962682" containerName="collect-profiles" Sep 29 14:00:05 crc kubenswrapper[4869]: E0929 14:00:05.431216 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="497f91e5-218a-4f87-b810-0fc7d7a98a71" containerName="mariadb-database-create" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.431222 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="497f91e5-218a-4f87-b810-0fc7d7a98a71" containerName="mariadb-database-create" Sep 29 14:00:05 crc kubenswrapper[4869]: E0929 14:00:05.431233 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="368fafef-d0b7-42b8-b2fa-73310a8111fc" containerName="mariadb-account-create" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.431239 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="368fafef-d0b7-42b8-b2fa-73310a8111fc" containerName="mariadb-account-create" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.436922 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="497f91e5-218a-4f87-b810-0fc7d7a98a71" containerName="mariadb-database-create" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.437013 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4467c3d-879c-4833-bfbd-7b6308962682" containerName="collect-profiles" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.437048 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="368fafef-d0b7-42b8-b2fa-73310a8111fc" containerName="mariadb-account-create" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.437074 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="89f8ac17-8c0d-475c-85a1-93847e5b0d8a" containerName="mariadb-database-create" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.438371 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a035-account-create-kmx6v" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.453540 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.472989 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-a035-account-create-kmx6v"] Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.576674 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnwm4\" (UniqueName: \"kubernetes.io/projected/6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e-kube-api-access-tnwm4\") pod \"glance-a035-account-create-kmx6v\" (UID: \"6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e\") " pod="openstack/glance-a035-account-create-kmx6v" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.678230 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnwm4\" (UniqueName: \"kubernetes.io/projected/6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e-kube-api-access-tnwm4\") pod \"glance-a035-account-create-kmx6v\" (UID: \"6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e\") " pod="openstack/glance-a035-account-create-kmx6v" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.697982 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnwm4\" (UniqueName: \"kubernetes.io/projected/6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e-kube-api-access-tnwm4\") pod \"glance-a035-account-create-kmx6v\" (UID: \"6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e\") " pod="openstack/glance-a035-account-create-kmx6v" Sep 29 14:00:05 crc kubenswrapper[4869]: I0929 14:00:05.769578 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a035-account-create-kmx6v" Sep 29 14:00:06 crc kubenswrapper[4869]: I0929 14:00:06.217553 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-a035-account-create-kmx6v"] Sep 29 14:00:06 crc kubenswrapper[4869]: I0929 14:00:06.319561 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a035-account-create-kmx6v" event={"ID":"6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e","Type":"ContainerStarted","Data":"47ef4de5a820f87dc9ca328bff83ae38e6c62076fd53bdfc62a80d7434d0dba3"} Sep 29 14:00:07 crc kubenswrapper[4869]: I0929 14:00:07.329196 4869 generic.go:334] "Generic (PLEG): container finished" podID="6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e" containerID="4e773771b262fcd97e352a7b8fcf727f2b9279a0f1e887482ca2da633956d9d9" exitCode=0 Sep 29 14:00:07 crc kubenswrapper[4869]: I0929 14:00:07.329299 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a035-account-create-kmx6v" event={"ID":"6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e","Type":"ContainerDied","Data":"4e773771b262fcd97e352a7b8fcf727f2b9279a0f1e887482ca2da633956d9d9"} Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.511423 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a035-account-create-kmx6v" Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.646608 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnwm4\" (UniqueName: \"kubernetes.io/projected/6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e-kube-api-access-tnwm4\") pod \"6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e\" (UID: \"6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e\") " Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.661080 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e-kube-api-access-tnwm4" (OuterVolumeSpecName: "kube-api-access-tnwm4") pod "6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e" (UID: "6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e"). InnerVolumeSpecName "kube-api-access-tnwm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.750935 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnwm4\" (UniqueName: \"kubernetes.io/projected/6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e-kube-api-access-tnwm4\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.937162 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f174-account-create-2x62k"] Sep 29 14:00:09 crc kubenswrapper[4869]: E0929 14:00:09.937549 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e" containerName="mariadb-account-create" Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.937567 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e" containerName="mariadb-account-create" Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.937859 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e" containerName="mariadb-account-create" Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.938740 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f174-account-create-2x62k" Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.942434 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Sep 29 14:00:09 crc kubenswrapper[4869]: I0929 14:00:09.947872 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f174-account-create-2x62k"] Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.055045 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwj4x\" (UniqueName: \"kubernetes.io/projected/9e851177-0022-4915-ace1-6586541e545b-kube-api-access-mwj4x\") pod \"keystone-f174-account-create-2x62k\" (UID: \"9e851177-0022-4915-ace1-6586541e545b\") " pod="openstack/keystone-f174-account-create-2x62k" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.080238 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-jhwdk" podUID="6dbad8f0-0816-40ae-b0b4-d6602f352641" containerName="ovn-controller" probeResult="failure" output=< Sep 29 14:00:10 crc kubenswrapper[4869]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Sep 29 14:00:10 crc kubenswrapper[4869]: > Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.134891 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-3528-account-create-kg89s"] Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.135934 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-3528-account-create-kg89s" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.138667 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.143149 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-3528-account-create-kg89s"] Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.161345 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwj4x\" (UniqueName: \"kubernetes.io/projected/9e851177-0022-4915-ace1-6586541e545b-kube-api-access-mwj4x\") pod \"keystone-f174-account-create-2x62k\" (UID: \"9e851177-0022-4915-ace1-6586541e545b\") " pod="openstack/keystone-f174-account-create-2x62k" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.179297 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwj4x\" (UniqueName: \"kubernetes.io/projected/9e851177-0022-4915-ace1-6586541e545b-kube-api-access-mwj4x\") pod \"keystone-f174-account-create-2x62k\" (UID: \"9e851177-0022-4915-ace1-6586541e545b\") " pod="openstack/keystone-f174-account-create-2x62k" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.254594 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f174-account-create-2x62k" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.263161 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4h7b\" (UniqueName: \"kubernetes.io/projected/9f96c0f0-dea8-42df-aff1-0470cbeb03d2-kube-api-access-f4h7b\") pod \"placement-3528-account-create-kg89s\" (UID: \"9f96c0f0-dea8-42df-aff1-0470cbeb03d2\") " pod="openstack/placement-3528-account-create-kg89s" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.359984 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a035-account-create-kmx6v" event={"ID":"6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e","Type":"ContainerDied","Data":"47ef4de5a820f87dc9ca328bff83ae38e6c62076fd53bdfc62a80d7434d0dba3"} Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.360282 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47ef4de5a820f87dc9ca328bff83ae38e6c62076fd53bdfc62a80d7434d0dba3" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.360037 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a035-account-create-kmx6v" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.365856 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4h7b\" (UniqueName: \"kubernetes.io/projected/9f96c0f0-dea8-42df-aff1-0470cbeb03d2-kube-api-access-f4h7b\") pod \"placement-3528-account-create-kg89s\" (UID: \"9f96c0f0-dea8-42df-aff1-0470cbeb03d2\") " pod="openstack/placement-3528-account-create-kg89s" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.386690 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4h7b\" (UniqueName: \"kubernetes.io/projected/9f96c0f0-dea8-42df-aff1-0470cbeb03d2-kube-api-access-f4h7b\") pod \"placement-3528-account-create-kg89s\" (UID: \"9f96c0f0-dea8-42df-aff1-0470cbeb03d2\") " pod="openstack/placement-3528-account-create-kg89s" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.462691 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-3528-account-create-kg89s" Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.710013 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f174-account-create-2x62k"] Sep 29 14:00:10 crc kubenswrapper[4869]: W0929 14:00:10.721306 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e851177_0022_4915_ace1_6586541e545b.slice/crio-747792c772af7dbd9a388ca0d5256ee545fb0130ce4f9fabbac600b60ddbf946 WatchSource:0}: Error finding container 747792c772af7dbd9a388ca0d5256ee545fb0130ce4f9fabbac600b60ddbf946: Status 404 returned error can't find the container with id 747792c772af7dbd9a388ca0d5256ee545fb0130ce4f9fabbac600b60ddbf946 Sep 29 14:00:10 crc kubenswrapper[4869]: I0929 14:00:10.904469 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-3528-account-create-kg89s"] Sep 29 14:00:10 crc kubenswrapper[4869]: W0929 14:00:10.907979 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9f96c0f0_dea8_42df_aff1_0470cbeb03d2.slice/crio-a5fbdc9ea298ada8a953e17632488027e8fb6c7f3414b3bb6c01157fe2011cd2 WatchSource:0}: Error finding container a5fbdc9ea298ada8a953e17632488027e8fb6c7f3414b3bb6c01157fe2011cd2: Status 404 returned error can't find the container with id a5fbdc9ea298ada8a953e17632488027e8fb6c7f3414b3bb6c01157fe2011cd2 Sep 29 14:00:11 crc kubenswrapper[4869]: I0929 14:00:11.370179 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-3528-account-create-kg89s" event={"ID":"9f96c0f0-dea8-42df-aff1-0470cbeb03d2","Type":"ContainerStarted","Data":"a5fbdc9ea298ada8a953e17632488027e8fb6c7f3414b3bb6c01157fe2011cd2"} Sep 29 14:00:11 crc kubenswrapper[4869]: I0929 14:00:11.371657 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f174-account-create-2x62k" event={"ID":"9e851177-0022-4915-ace1-6586541e545b","Type":"ContainerStarted","Data":"747792c772af7dbd9a388ca0d5256ee545fb0130ce4f9fabbac600b60ddbf946"} Sep 29 14:00:13 crc kubenswrapper[4869]: I0929 14:00:13.395434 4869 generic.go:334] "Generic (PLEG): container finished" podID="9f96c0f0-dea8-42df-aff1-0470cbeb03d2" containerID="e52c5904102c730bd64678196c012a0ccc3efe81ebd8f97a0c229d6ed0ea9ef2" exitCode=0 Sep 29 14:00:13 crc kubenswrapper[4869]: I0929 14:00:13.395534 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-3528-account-create-kg89s" event={"ID":"9f96c0f0-dea8-42df-aff1-0470cbeb03d2","Type":"ContainerDied","Data":"e52c5904102c730bd64678196c012a0ccc3efe81ebd8f97a0c229d6ed0ea9ef2"} Sep 29 14:00:13 crc kubenswrapper[4869]: I0929 14:00:13.398511 4869 generic.go:334] "Generic (PLEG): container finished" podID="9e851177-0022-4915-ace1-6586541e545b" containerID="417d186a4bd3d5a12a53100d4948fb7373b2fe46717fa43e2d962b3a13b46d83" exitCode=0 Sep 29 14:00:13 crc kubenswrapper[4869]: I0929 14:00:13.398578 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f174-account-create-2x62k" event={"ID":"9e851177-0022-4915-ace1-6586541e545b","Type":"ContainerDied","Data":"417d186a4bd3d5a12a53100d4948fb7373b2fe46717fa43e2d962b3a13b46d83"} Sep 29 14:00:14 crc kubenswrapper[4869]: I0929 14:00:14.904486 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f174-account-create-2x62k" Sep 29 14:00:14 crc kubenswrapper[4869]: I0929 14:00:14.911291 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-3528-account-create-kg89s" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.047144 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4h7b\" (UniqueName: \"kubernetes.io/projected/9f96c0f0-dea8-42df-aff1-0470cbeb03d2-kube-api-access-f4h7b\") pod \"9f96c0f0-dea8-42df-aff1-0470cbeb03d2\" (UID: \"9f96c0f0-dea8-42df-aff1-0470cbeb03d2\") " Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.047379 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwj4x\" (UniqueName: \"kubernetes.io/projected/9e851177-0022-4915-ace1-6586541e545b-kube-api-access-mwj4x\") pod \"9e851177-0022-4915-ace1-6586541e545b\" (UID: \"9e851177-0022-4915-ace1-6586541e545b\") " Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.053194 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e851177-0022-4915-ace1-6586541e545b-kube-api-access-mwj4x" (OuterVolumeSpecName: "kube-api-access-mwj4x") pod "9e851177-0022-4915-ace1-6586541e545b" (UID: "9e851177-0022-4915-ace1-6586541e545b"). InnerVolumeSpecName "kube-api-access-mwj4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.065305 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f96c0f0-dea8-42df-aff1-0470cbeb03d2-kube-api-access-f4h7b" (OuterVolumeSpecName: "kube-api-access-f4h7b") pod "9f96c0f0-dea8-42df-aff1-0470cbeb03d2" (UID: "9f96c0f0-dea8-42df-aff1-0470cbeb03d2"). InnerVolumeSpecName "kube-api-access-f4h7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.086742 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-jhwdk" podUID="6dbad8f0-0816-40ae-b0b4-d6602f352641" containerName="ovn-controller" probeResult="failure" output=< Sep 29 14:00:15 crc kubenswrapper[4869]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Sep 29 14:00:15 crc kubenswrapper[4869]: > Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.149860 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwj4x\" (UniqueName: \"kubernetes.io/projected/9e851177-0022-4915-ace1-6586541e545b-kube-api-access-mwj4x\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.149892 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4h7b\" (UniqueName: \"kubernetes.io/projected/9f96c0f0-dea8-42df-aff1-0470cbeb03d2-kube-api-access-f4h7b\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.232774 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.241847 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-4w2zq" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.421803 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f174-account-create-2x62k" event={"ID":"9e851177-0022-4915-ace1-6586541e545b","Type":"ContainerDied","Data":"747792c772af7dbd9a388ca0d5256ee545fb0130ce4f9fabbac600b60ddbf946"} Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.421831 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f174-account-create-2x62k" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.421856 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="747792c772af7dbd9a388ca0d5256ee545fb0130ce4f9fabbac600b60ddbf946" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.423898 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-3528-account-create-kg89s" event={"ID":"9f96c0f0-dea8-42df-aff1-0470cbeb03d2","Type":"ContainerDied","Data":"a5fbdc9ea298ada8a953e17632488027e8fb6c7f3414b3bb6c01157fe2011cd2"} Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.423964 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5fbdc9ea298ada8a953e17632488027e8fb6c7f3414b3bb6c01157fe2011cd2" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.423932 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-3528-account-create-kg89s" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.497884 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jhwdk-config-gntdv"] Sep 29 14:00:15 crc kubenswrapper[4869]: E0929 14:00:15.498711 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e851177-0022-4915-ace1-6586541e545b" containerName="mariadb-account-create" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.498730 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e851177-0022-4915-ace1-6586541e545b" containerName="mariadb-account-create" Sep 29 14:00:15 crc kubenswrapper[4869]: E0929 14:00:15.498753 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f96c0f0-dea8-42df-aff1-0470cbeb03d2" containerName="mariadb-account-create" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.498760 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f96c0f0-dea8-42df-aff1-0470cbeb03d2" containerName="mariadb-account-create" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.498960 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e851177-0022-4915-ace1-6586541e545b" containerName="mariadb-account-create" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.498995 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f96c0f0-dea8-42df-aff1-0470cbeb03d2" containerName="mariadb-account-create" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.499652 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.503352 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.509086 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jhwdk-config-gntdv"] Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.608425 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-npjf6"] Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.609650 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.613837 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-87hr5" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.614077 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.627894 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-npjf6"] Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.657752 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.657793 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run-ovn\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.657824 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-log-ovn\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.657906 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phct9\" (UniqueName: \"kubernetes.io/projected/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-kube-api-access-phct9\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.657960 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-scripts\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.657990 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-additional-scripts\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.759745 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-db-sync-config-data\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.759793 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-combined-ca-bundle\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.759830 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.759856 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run-ovn\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760066 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-log-ovn\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760138 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-log-ovn\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760100 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760092 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run-ovn\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760336 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phct9\" (UniqueName: \"kubernetes.io/projected/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-kube-api-access-phct9\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760481 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5rpn\" (UniqueName: \"kubernetes.io/projected/62ecf715-d247-4d7d-baa0-2c929a73a141-kube-api-access-x5rpn\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760540 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-scripts\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760661 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-additional-scripts\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.760850 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-config-data\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.761429 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-additional-scripts\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.762576 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-scripts\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.779902 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phct9\" (UniqueName: \"kubernetes.io/projected/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-kube-api-access-phct9\") pod \"ovn-controller-jhwdk-config-gntdv\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.828275 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.863028 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-db-sync-config-data\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.863086 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-combined-ca-bundle\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.863224 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5rpn\" (UniqueName: \"kubernetes.io/projected/62ecf715-d247-4d7d-baa0-2c929a73a141-kube-api-access-x5rpn\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.863326 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-config-data\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.866096 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="0d97e3c5-9850-428b-9d88-89307901912d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.107:5671: connect: connection refused" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.867077 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-db-sync-config-data\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.867224 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-combined-ca-bundle\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.867251 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-config-data\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.887982 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5rpn\" (UniqueName: \"kubernetes.io/projected/62ecf715-d247-4d7d-baa0-2c929a73a141-kube-api-access-x5rpn\") pod \"glance-db-sync-npjf6\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:15 crc kubenswrapper[4869]: I0929 14:00:15.931403 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:16 crc kubenswrapper[4869]: I0929 14:00:16.196587 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.108:5671: connect: connection refused" Sep 29 14:00:16 crc kubenswrapper[4869]: I0929 14:00:16.532765 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-notifications-server-0" podUID="0b77ed89-e796-4138-ae2c-fcd5f2125233" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.109:5671: connect: connection refused" Sep 29 14:00:17 crc kubenswrapper[4869]: I0929 14:00:17.107205 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-npjf6"] Sep 29 14:00:17 crc kubenswrapper[4869]: W0929 14:00:17.112489 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62ecf715_d247_4d7d_baa0_2c929a73a141.slice/crio-8e55ea4b1b725066d2259f17ef83160a5c09d59038f4fc376ed8ee4c817c3579 WatchSource:0}: Error finding container 8e55ea4b1b725066d2259f17ef83160a5c09d59038f4fc376ed8ee4c817c3579: Status 404 returned error can't find the container with id 8e55ea4b1b725066d2259f17ef83160a5c09d59038f4fc376ed8ee4c817c3579 Sep 29 14:00:17 crc kubenswrapper[4869]: I0929 14:00:17.122728 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jhwdk-config-gntdv"] Sep 29 14:00:17 crc kubenswrapper[4869]: I0929 14:00:17.443190 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-npjf6" event={"ID":"62ecf715-d247-4d7d-baa0-2c929a73a141","Type":"ContainerStarted","Data":"8e55ea4b1b725066d2259f17ef83160a5c09d59038f4fc376ed8ee4c817c3579"} Sep 29 14:00:17 crc kubenswrapper[4869]: I0929 14:00:17.446950 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerStarted","Data":"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623"} Sep 29 14:00:17 crc kubenswrapper[4869]: I0929 14:00:17.450685 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jhwdk-config-gntdv" event={"ID":"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3","Type":"ContainerStarted","Data":"3866e87d7e520e69bcfbb508b569d0869f300ffceece9ee96ff4b1626b119b83"} Sep 29 14:00:17 crc kubenswrapper[4869]: I0929 14:00:17.477726 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=16.36974233 podStartE2EDuration="1m6.477710892s" podCreationTimestamp="2025-09-29 13:59:11 +0000 UTC" firstStartedPulling="2025-09-29 13:59:26.461878131 +0000 UTC m=+1092.902522451" lastFinishedPulling="2025-09-29 14:00:16.569846693 +0000 UTC m=+1143.010491013" observedRunningTime="2025-09-29 14:00:17.476206033 +0000 UTC m=+1143.916850353" watchObservedRunningTime="2025-09-29 14:00:17.477710892 +0000 UTC m=+1143.918355212" Sep 29 14:00:18 crc kubenswrapper[4869]: I0929 14:00:18.048252 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:18 crc kubenswrapper[4869]: I0929 14:00:18.460421 4869 generic.go:334] "Generic (PLEG): container finished" podID="70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" containerID="e8b18c233c2243fde14ba079808129a4b7275a61f4fbbf1183b51dde01dad42e" exitCode=0 Sep 29 14:00:18 crc kubenswrapper[4869]: I0929 14:00:18.460564 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jhwdk-config-gntdv" event={"ID":"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3","Type":"ContainerDied","Data":"e8b18c233c2243fde14ba079808129a4b7275a61f4fbbf1183b51dde01dad42e"} Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.755784 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.838408 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phct9\" (UniqueName: \"kubernetes.io/projected/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-kube-api-access-phct9\") pod \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.839522 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-scripts\") pod \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.839669 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run\") pod \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.839765 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run" (OuterVolumeSpecName: "var-run") pod "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" (UID: "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.839905 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run-ovn\") pod \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.840043 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-additional-scripts\") pod \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.840141 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-log-ovn\") pod \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\" (UID: \"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3\") " Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.840268 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" (UID: "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.840319 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" (UID: "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.840837 4869 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-log-ovn\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.840916 4869 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.840973 4869 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-var-run-ovn\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.841007 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" (UID: "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.841186 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-scripts" (OuterVolumeSpecName: "scripts") pod "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" (UID: "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.843602 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-kube-api-access-phct9" (OuterVolumeSpecName: "kube-api-access-phct9") pod "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" (UID: "70af9b66-a70b-4cbf-b88b-2b5b7797d6a3"). InnerVolumeSpecName "kube-api-access-phct9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.942795 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.942920 4869 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-additional-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:19 crc kubenswrapper[4869]: I0929 14:00:19.942934 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phct9\" (UniqueName: \"kubernetes.io/projected/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3-kube-api-access-phct9\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.096412 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-jhwdk" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.477272 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jhwdk-config-gntdv" event={"ID":"70af9b66-a70b-4cbf-b88b-2b5b7797d6a3","Type":"ContainerDied","Data":"3866e87d7e520e69bcfbb508b569d0869f300ffceece9ee96ff4b1626b119b83"} Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.477313 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3866e87d7e520e69bcfbb508b569d0869f300ffceece9ee96ff4b1626b119b83" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.477325 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk-config-gntdv" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.656928 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.656991 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.657037 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.657593 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"67f7d50b20b583fdfc73b613bb7e9647bd7a3b5d2b7aab39171da5a668956c60"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.657658 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://67f7d50b20b583fdfc73b613bb7e9647bd7a3b5d2b7aab39171da5a668956c60" gracePeriod=600 Sep 29 14:00:20 crc kubenswrapper[4869]: E0929 14:00:20.789450 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2cb4b77_d447_4866_ac1e_eb4f0b4babae.slice/crio-conmon-67f7d50b20b583fdfc73b613bb7e9647bd7a3b5d2b7aab39171da5a668956c60.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.873299 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-jhwdk-config-gntdv"] Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.881637 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-jhwdk-config-gntdv"] Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.928205 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jhwdk-config-9f98l"] Sep 29 14:00:20 crc kubenswrapper[4869]: E0929 14:00:20.928943 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" containerName="ovn-config" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.929038 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" containerName="ovn-config" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.929297 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" containerName="ovn-config" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.930213 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.942450 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Sep 29 14:00:20 crc kubenswrapper[4869]: I0929 14:00:20.953314 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jhwdk-config-9f98l"] Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.082085 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.082188 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-scripts\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.082323 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-log-ovn\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.082354 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-additional-scripts\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.082425 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npf65\" (UniqueName: \"kubernetes.io/projected/c342e61d-0f2e-4e2c-98a9-c58397596a90-kube-api-access-npf65\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.082448 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run-ovn\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.184644 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.184715 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-scripts\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.184764 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-log-ovn\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.184795 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-additional-scripts\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.184837 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npf65\" (UniqueName: \"kubernetes.io/projected/c342e61d-0f2e-4e2c-98a9-c58397596a90-kube-api-access-npf65\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.184862 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run-ovn\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.185078 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run-ovn\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.185066 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.185081 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-log-ovn\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.185689 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-additional-scripts\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.186742 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-scripts\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.214376 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npf65\" (UniqueName: \"kubernetes.io/projected/c342e61d-0f2e-4e2c-98a9-c58397596a90-kube-api-access-npf65\") pod \"ovn-controller-jhwdk-config-9f98l\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.246383 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.497741 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="67f7d50b20b583fdfc73b613bb7e9647bd7a3b5d2b7aab39171da5a668956c60" exitCode=0 Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.497810 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"67f7d50b20b583fdfc73b613bb7e9647bd7a3b5d2b7aab39171da5a668956c60"} Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.498113 4869 scope.go:117] "RemoveContainer" containerID="759af6dee641bff740e90c1320e6cbfb6e8ed5030cb0042b5634388f31d6067a" Sep 29 14:00:21 crc kubenswrapper[4869]: I0929 14:00:21.725009 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jhwdk-config-9f98l"] Sep 29 14:00:21 crc kubenswrapper[4869]: W0929 14:00:21.730455 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc342e61d_0f2e_4e2c_98a9_c58397596a90.slice/crio-b9a9ce6636eebe424a2b9e079afeccbc075c339a53db22a6045238084a294fec WatchSource:0}: Error finding container b9a9ce6636eebe424a2b9e079afeccbc075c339a53db22a6045238084a294fec: Status 404 returned error can't find the container with id b9a9ce6636eebe424a2b9e079afeccbc075c339a53db22a6045238084a294fec Sep 29 14:00:22 crc kubenswrapper[4869]: I0929 14:00:22.255309 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70af9b66-a70b-4cbf-b88b-2b5b7797d6a3" path="/var/lib/kubelet/pods/70af9b66-a70b-4cbf-b88b-2b5b7797d6a3/volumes" Sep 29 14:00:22 crc kubenswrapper[4869]: I0929 14:00:22.508425 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"9bb29cba4ec61eaadf6d94eecf52b0683364614417f5603e9f08f9d5b6ae2413"} Sep 29 14:00:22 crc kubenswrapper[4869]: I0929 14:00:22.510067 4869 generic.go:334] "Generic (PLEG): container finished" podID="c342e61d-0f2e-4e2c-98a9-c58397596a90" containerID="7e72ca549713c7e7b27dbae5b9ee4dbacfdd9679c375b30ef619e62893dc6cf6" exitCode=0 Sep 29 14:00:22 crc kubenswrapper[4869]: I0929 14:00:22.510095 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jhwdk-config-9f98l" event={"ID":"c342e61d-0f2e-4e2c-98a9-c58397596a90","Type":"ContainerDied","Data":"7e72ca549713c7e7b27dbae5b9ee4dbacfdd9679c375b30ef619e62893dc6cf6"} Sep 29 14:00:22 crc kubenswrapper[4869]: I0929 14:00:22.510109 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jhwdk-config-9f98l" event={"ID":"c342e61d-0f2e-4e2c-98a9-c58397596a90","Type":"ContainerStarted","Data":"b9a9ce6636eebe424a2b9e079afeccbc075c339a53db22a6045238084a294fec"} Sep 29 14:00:25 crc kubenswrapper[4869]: I0929 14:00:25.865919 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:00:26 crc kubenswrapper[4869]: I0929 14:00:26.209184 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Sep 29 14:00:26 crc kubenswrapper[4869]: I0929 14:00:26.527883 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-notifications-server-0" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.374012 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-f2wnx"] Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.376077 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f2wnx" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.390175 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-f2wnx"] Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.499730 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s46pv\" (UniqueName: \"kubernetes.io/projected/c9ee6e01-c73e-499a-b626-ac73939d9af3-kube-api-access-s46pv\") pod \"barbican-db-create-f2wnx\" (UID: \"c9ee6e01-c73e-499a-b626-ac73939d9af3\") " pod="openstack/barbican-db-create-f2wnx" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.500359 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-52dht"] Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.501560 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-52dht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.514328 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-52dht"] Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.601493 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndsd7\" (UniqueName: \"kubernetes.io/projected/df9ac455-60af-4f59-9eb1-8dcb8569a21c-kube-api-access-ndsd7\") pod \"cinder-db-create-52dht\" (UID: \"df9ac455-60af-4f59-9eb1-8dcb8569a21c\") " pod="openstack/cinder-db-create-52dht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.601573 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s46pv\" (UniqueName: \"kubernetes.io/projected/c9ee6e01-c73e-499a-b626-ac73939d9af3-kube-api-access-s46pv\") pod \"barbican-db-create-f2wnx\" (UID: \"c9ee6e01-c73e-499a-b626-ac73939d9af3\") " pod="openstack/barbican-db-create-f2wnx" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.630929 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s46pv\" (UniqueName: \"kubernetes.io/projected/c9ee6e01-c73e-499a-b626-ac73939d9af3-kube-api-access-s46pv\") pod \"barbican-db-create-f2wnx\" (UID: \"c9ee6e01-c73e-499a-b626-ac73939d9af3\") " pod="openstack/barbican-db-create-f2wnx" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.679068 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-gn5ht"] Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.680335 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gn5ht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.690049 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f2wnx" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.692083 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-gn5ht"] Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.704580 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndsd7\" (UniqueName: \"kubernetes.io/projected/df9ac455-60af-4f59-9eb1-8dcb8569a21c-kube-api-access-ndsd7\") pod \"cinder-db-create-52dht\" (UID: \"df9ac455-60af-4f59-9eb1-8dcb8569a21c\") " pod="openstack/cinder-db-create-52dht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.704680 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkmhp\" (UniqueName: \"kubernetes.io/projected/de807e17-2ffa-4679-9156-1905ebf6c3e6-kube-api-access-wkmhp\") pod \"neutron-db-create-gn5ht\" (UID: \"de807e17-2ffa-4679-9156-1905ebf6c3e6\") " pod="openstack/neutron-db-create-gn5ht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.729343 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndsd7\" (UniqueName: \"kubernetes.io/projected/df9ac455-60af-4f59-9eb1-8dcb8569a21c-kube-api-access-ndsd7\") pod \"cinder-db-create-52dht\" (UID: \"df9ac455-60af-4f59-9eb1-8dcb8569a21c\") " pod="openstack/cinder-db-create-52dht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.806241 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkmhp\" (UniqueName: \"kubernetes.io/projected/de807e17-2ffa-4679-9156-1905ebf6c3e6-kube-api-access-wkmhp\") pod \"neutron-db-create-gn5ht\" (UID: \"de807e17-2ffa-4679-9156-1905ebf6c3e6\") " pod="openstack/neutron-db-create-gn5ht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.823781 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-52dht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.829232 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkmhp\" (UniqueName: \"kubernetes.io/projected/de807e17-2ffa-4679-9156-1905ebf6c3e6-kube-api-access-wkmhp\") pod \"neutron-db-create-gn5ht\" (UID: \"de807e17-2ffa-4679-9156-1905ebf6c3e6\") " pod="openstack/neutron-db-create-gn5ht" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.841341 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-hdwlg"] Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.842470 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.845669 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.845858 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.845991 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-77f6b" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.846114 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.891142 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-hdwlg"] Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.908443 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-config-data\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.908691 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xl95v\" (UniqueName: \"kubernetes.io/projected/dae249ed-58ac-41dc-b84c-59ba9f80d003-kube-api-access-xl95v\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.908772 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-combined-ca-bundle\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:27 crc kubenswrapper[4869]: I0929 14:00:27.997558 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gn5ht" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.011001 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xl95v\" (UniqueName: \"kubernetes.io/projected/dae249ed-58ac-41dc-b84c-59ba9f80d003-kube-api-access-xl95v\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.011065 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-combined-ca-bundle\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.011123 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-config-data\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.018714 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-combined-ca-bundle\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.029595 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-config-data\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.041740 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xl95v\" (UniqueName: \"kubernetes.io/projected/dae249ed-58ac-41dc-b84c-59ba9f80d003-kube-api-access-xl95v\") pod \"keystone-db-sync-hdwlg\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.048392 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.051041 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.199182 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.587985 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.945371 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-qghzx"] Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.946516 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.948753 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-4smxq" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.948989 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Sep 29 14:00:28 crc kubenswrapper[4869]: I0929 14:00:28.955677 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-qghzx"] Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.135362 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-combined-ca-bundle\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.135474 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hl7bx\" (UniqueName: \"kubernetes.io/projected/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-kube-api-access-hl7bx\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.135529 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-config-data\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.136147 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-db-sync-config-data\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.238476 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hl7bx\" (UniqueName: \"kubernetes.io/projected/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-kube-api-access-hl7bx\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.238887 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-config-data\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.238926 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-db-sync-config-data\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.239020 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-combined-ca-bundle\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.251820 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-db-sync-config-data\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.251928 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-combined-ca-bundle\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.252004 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-config-data\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.259841 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hl7bx\" (UniqueName: \"kubernetes.io/projected/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-kube-api-access-hl7bx\") pod \"watcher-db-sync-qghzx\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:29 crc kubenswrapper[4869]: I0929 14:00:29.265978 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.185148 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.361533 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run-ovn\") pod \"c342e61d-0f2e-4e2c-98a9-c58397596a90\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.361976 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run\") pod \"c342e61d-0f2e-4e2c-98a9-c58397596a90\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.362014 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-additional-scripts\") pod \"c342e61d-0f2e-4e2c-98a9-c58397596a90\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.362043 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-scripts\") pod \"c342e61d-0f2e-4e2c-98a9-c58397596a90\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.362111 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-log-ovn\") pod \"c342e61d-0f2e-4e2c-98a9-c58397596a90\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.362145 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npf65\" (UniqueName: \"kubernetes.io/projected/c342e61d-0f2e-4e2c-98a9-c58397596a90-kube-api-access-npf65\") pod \"c342e61d-0f2e-4e2c-98a9-c58397596a90\" (UID: \"c342e61d-0f2e-4e2c-98a9-c58397596a90\") " Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.361674 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "c342e61d-0f2e-4e2c-98a9-c58397596a90" (UID: "c342e61d-0f2e-4e2c-98a9-c58397596a90"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.363038 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "c342e61d-0f2e-4e2c-98a9-c58397596a90" (UID: "c342e61d-0f2e-4e2c-98a9-c58397596a90"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.363100 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run" (OuterVolumeSpecName: "var-run") pod "c342e61d-0f2e-4e2c-98a9-c58397596a90" (UID: "c342e61d-0f2e-4e2c-98a9-c58397596a90"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.364250 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "c342e61d-0f2e-4e2c-98a9-c58397596a90" (UID: "c342e61d-0f2e-4e2c-98a9-c58397596a90"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.365163 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-scripts" (OuterVolumeSpecName: "scripts") pod "c342e61d-0f2e-4e2c-98a9-c58397596a90" (UID: "c342e61d-0f2e-4e2c-98a9-c58397596a90"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:30 crc kubenswrapper[4869]: I0929 14:00:30.388761 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c342e61d-0f2e-4e2c-98a9-c58397596a90-kube-api-access-npf65" (OuterVolumeSpecName: "kube-api-access-npf65") pod "c342e61d-0f2e-4e2c-98a9-c58397596a90" (UID: "c342e61d-0f2e-4e2c-98a9-c58397596a90"). InnerVolumeSpecName "kube-api-access-npf65". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.464927 4869 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run-ovn\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.464956 4869 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-run\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.464966 4869 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-additional-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.464975 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c342e61d-0f2e-4e2c-98a9-c58397596a90-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.464984 4869 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c342e61d-0f2e-4e2c-98a9-c58397596a90-var-log-ovn\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.464993 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npf65\" (UniqueName: \"kubernetes.io/projected/c342e61d-0f2e-4e2c-98a9-c58397596a90-kube-api-access-npf65\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.552676 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.615313 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="prometheus" containerID="cri-o://0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d" gracePeriod=600 Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.615454 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jhwdk-config-9f98l" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.616242 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="config-reloader" containerID="cri-o://bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8" gracePeriod=600 Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.616536 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="thanos-sidecar" containerID="cri-o://0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623" gracePeriod=600 Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.616727 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jhwdk-config-9f98l" event={"ID":"c342e61d-0f2e-4e2c-98a9-c58397596a90","Type":"ContainerDied","Data":"b9a9ce6636eebe424a2b9e079afeccbc075c339a53db22a6045238084a294fec"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.616763 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9a9ce6636eebe424a2b9e079afeccbc075c339a53db22a6045238084a294fec" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:30.618459 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-f2wnx"] Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.192390 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-qghzx"] Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.214191 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-52dht"] Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.276638 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-jhwdk-config-9f98l"] Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.301802 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-jhwdk-config-9f98l"] Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.385681 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-hdwlg"] Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.400686 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-gn5ht"] Sep 29 14:00:31 crc kubenswrapper[4869]: W0929 14:00:31.431175 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddae249ed_58ac_41dc_b84c_59ba9f80d003.slice/crio-f34eefb30d8d3c327a3ed8d879dfb42b6cb5d245f7634d4fd1525f4657ae8e24 WatchSource:0}: Error finding container f34eefb30d8d3c327a3ed8d879dfb42b6cb5d245f7634d4fd1525f4657ae8e24: Status 404 returned error can't find the container with id f34eefb30d8d3c327a3ed8d879dfb42b6cb5d245f7634d4fd1525f4657ae8e24 Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.633179 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.652767 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qghzx" event={"ID":"bf1f0c5e-7cbc-4d6f-afc4-241de854da60","Type":"ContainerStarted","Data":"be5cf8363bf6316273034e92e897cd1f64c23977a2951eb0a4e8891ac30771e2"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.654705 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gn5ht" event={"ID":"de807e17-2ffa-4679-9156-1905ebf6c3e6","Type":"ContainerStarted","Data":"ca25ca703226e33cca5066d45c4c70d7c36336b9f317836bf216426006c6c0a1"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.657230 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-52dht" event={"ID":"df9ac455-60af-4f59-9eb1-8dcb8569a21c","Type":"ContainerStarted","Data":"204b34762bcc1c2859973455721efec89dcf694fc8f5ba4610d6eb234eb0eda0"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.657268 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-52dht" event={"ID":"df9ac455-60af-4f59-9eb1-8dcb8569a21c","Type":"ContainerStarted","Data":"e91cb439415e27ff5ee598131174ddd078231c02fbfb92d590c5ea1451267d89"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.658579 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-hdwlg" event={"ID":"dae249ed-58ac-41dc-b84c-59ba9f80d003","Type":"ContainerStarted","Data":"f34eefb30d8d3c327a3ed8d879dfb42b6cb5d245f7634d4fd1525f4657ae8e24"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.691268 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-52dht" podStartSLOduration=4.691240631 podStartE2EDuration="4.691240631s" podCreationTimestamp="2025-09-29 14:00:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:00:31.689594369 +0000 UTC m=+1158.130238689" watchObservedRunningTime="2025-09-29 14:00:31.691240631 +0000 UTC m=+1158.131884971" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.695735 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-npjf6" event={"ID":"62ecf715-d247-4d7d-baa0-2c929a73a141","Type":"ContainerStarted","Data":"b5ea25fc052f09c2b7433621953e6df568bb6a63e11f83d9ff6e0e5305c29467"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708407 4869 generic.go:334] "Generic (PLEG): container finished" podID="10a66559-191f-464a-8095-efb79cc5b29f" containerID="0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623" exitCode=0 Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708440 4869 generic.go:334] "Generic (PLEG): container finished" podID="10a66559-191f-464a-8095-efb79cc5b29f" containerID="bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8" exitCode=0 Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708448 4869 generic.go:334] "Generic (PLEG): container finished" podID="10a66559-191f-464a-8095-efb79cc5b29f" containerID="0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d" exitCode=0 Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708486 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerDied","Data":"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708511 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerDied","Data":"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708522 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerDied","Data":"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708531 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"10a66559-191f-464a-8095-efb79cc5b29f","Type":"ContainerDied","Data":"7db0897924248cffdd4753c851683177b635cf350eefe6cb717ae32ed859393a"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708548 4869 scope.go:117] "RemoveContainer" containerID="0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.708680 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.714600 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-npjf6" podStartSLOduration=3.651370354 podStartE2EDuration="16.714580468s" podCreationTimestamp="2025-09-29 14:00:15 +0000 UTC" firstStartedPulling="2025-09-29 14:00:17.115208225 +0000 UTC m=+1143.555852546" lastFinishedPulling="2025-09-29 14:00:30.17841834 +0000 UTC m=+1156.619062660" observedRunningTime="2025-09-29 14:00:31.710752329 +0000 UTC m=+1158.151396649" watchObservedRunningTime="2025-09-29 14:00:31.714580468 +0000 UTC m=+1158.155224788" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.717511 4869 generic.go:334] "Generic (PLEG): container finished" podID="c9ee6e01-c73e-499a-b626-ac73939d9af3" containerID="0321aafb5f9d3b8ecc8038ebbcd454d68bf605d1d7daed969e718c88ed4f1bc8" exitCode=0 Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.717571 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f2wnx" event={"ID":"c9ee6e01-c73e-499a-b626-ac73939d9af3","Type":"ContainerDied","Data":"0321aafb5f9d3b8ecc8038ebbcd454d68bf605d1d7daed969e718c88ed4f1bc8"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.717630 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f2wnx" event={"ID":"c9ee6e01-c73e-499a-b626-ac73939d9af3","Type":"ContainerStarted","Data":"b197c75f602b147a406017fa90c064010a3cec89594f3f7db0af4c5a7e62aaf4"} Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.762153 4869 scope.go:117] "RemoveContainer" containerID="bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.791209 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-tls-assets\") pod \"10a66559-191f-464a-8095-efb79cc5b29f\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.791300 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/10a66559-191f-464a-8095-efb79cc5b29f-config-out\") pod \"10a66559-191f-464a-8095-efb79cc5b29f\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.791334 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-thanos-prometheus-http-client-file\") pod \"10a66559-191f-464a-8095-efb79cc5b29f\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.791357 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktbr7\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-kube-api-access-ktbr7\") pod \"10a66559-191f-464a-8095-efb79cc5b29f\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.791392 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/10a66559-191f-464a-8095-efb79cc5b29f-prometheus-metric-storage-rulefiles-0\") pod \"10a66559-191f-464a-8095-efb79cc5b29f\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.791686 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"10a66559-191f-464a-8095-efb79cc5b29f\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.791733 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-config\") pod \"10a66559-191f-464a-8095-efb79cc5b29f\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.791767 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-web-config\") pod \"10a66559-191f-464a-8095-efb79cc5b29f\" (UID: \"10a66559-191f-464a-8095-efb79cc5b29f\") " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.793497 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10a66559-191f-464a-8095-efb79cc5b29f-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "10a66559-191f-464a-8095-efb79cc5b29f" (UID: "10a66559-191f-464a-8095-efb79cc5b29f"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.800081 4869 scope.go:117] "RemoveContainer" containerID="0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.800419 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-kube-api-access-ktbr7" (OuterVolumeSpecName: "kube-api-access-ktbr7") pod "10a66559-191f-464a-8095-efb79cc5b29f" (UID: "10a66559-191f-464a-8095-efb79cc5b29f"). InnerVolumeSpecName "kube-api-access-ktbr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.800508 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-config" (OuterVolumeSpecName: "config") pod "10a66559-191f-464a-8095-efb79cc5b29f" (UID: "10a66559-191f-464a-8095-efb79cc5b29f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.800704 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "10a66559-191f-464a-8095-efb79cc5b29f" (UID: "10a66559-191f-464a-8095-efb79cc5b29f"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.800889 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10a66559-191f-464a-8095-efb79cc5b29f-config-out" (OuterVolumeSpecName: "config-out") pod "10a66559-191f-464a-8095-efb79cc5b29f" (UID: "10a66559-191f-464a-8095-efb79cc5b29f"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.804563 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "10a66559-191f-464a-8095-efb79cc5b29f" (UID: "10a66559-191f-464a-8095-efb79cc5b29f"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.823792 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "10a66559-191f-464a-8095-efb79cc5b29f" (UID: "10a66559-191f-464a-8095-efb79cc5b29f"). InnerVolumeSpecName "pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9". PluginName "kubernetes.io/csi", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.829677 4869 scope.go:117] "RemoveContainer" containerID="1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.835594 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-web-config" (OuterVolumeSpecName: "web-config") pod "10a66559-191f-464a-8095-efb79cc5b29f" (UID: "10a66559-191f-464a-8095-efb79cc5b29f"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.854355 4869 scope.go:117] "RemoveContainer" containerID="0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623" Sep 29 14:00:31 crc kubenswrapper[4869]: E0929 14:00:31.857243 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623\": container with ID starting with 0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623 not found: ID does not exist" containerID="0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.857332 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623"} err="failed to get container status \"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623\": rpc error: code = NotFound desc = could not find container \"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623\": container with ID starting with 0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623 not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.857363 4869 scope.go:117] "RemoveContainer" containerID="bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8" Sep 29 14:00:31 crc kubenswrapper[4869]: E0929 14:00:31.858385 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8\": container with ID starting with bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8 not found: ID does not exist" containerID="bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.858412 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8"} err="failed to get container status \"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8\": rpc error: code = NotFound desc = could not find container \"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8\": container with ID starting with bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8 not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.858429 4869 scope.go:117] "RemoveContainer" containerID="0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d" Sep 29 14:00:31 crc kubenswrapper[4869]: E0929 14:00:31.858903 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d\": container with ID starting with 0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d not found: ID does not exist" containerID="0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.858938 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d"} err="failed to get container status \"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d\": rpc error: code = NotFound desc = could not find container \"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d\": container with ID starting with 0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.858957 4869 scope.go:117] "RemoveContainer" containerID="1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af" Sep 29 14:00:31 crc kubenswrapper[4869]: E0929 14:00:31.859224 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af\": container with ID starting with 1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af not found: ID does not exist" containerID="1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.859312 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af"} err="failed to get container status \"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af\": rpc error: code = NotFound desc = could not find container \"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af\": container with ID starting with 1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.859383 4869 scope.go:117] "RemoveContainer" containerID="0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.859942 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623"} err="failed to get container status \"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623\": rpc error: code = NotFound desc = could not find container \"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623\": container with ID starting with 0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623 not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.859986 4869 scope.go:117] "RemoveContainer" containerID="bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.860251 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8"} err="failed to get container status \"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8\": rpc error: code = NotFound desc = could not find container \"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8\": container with ID starting with bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8 not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.860264 4869 scope.go:117] "RemoveContainer" containerID="0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.860445 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d"} err="failed to get container status \"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d\": rpc error: code = NotFound desc = could not find container \"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d\": container with ID starting with 0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.860458 4869 scope.go:117] "RemoveContainer" containerID="1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.861248 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af"} err="failed to get container status \"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af\": rpc error: code = NotFound desc = could not find container \"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af\": container with ID starting with 1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.861271 4869 scope.go:117] "RemoveContainer" containerID="0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.861517 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623"} err="failed to get container status \"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623\": rpc error: code = NotFound desc = could not find container \"0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623\": container with ID starting with 0ab1cd73f12db5dea81c8f73122ba85bfad39a0ece54352e514ec8b715b21623 not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.861534 4869 scope.go:117] "RemoveContainer" containerID="bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.861776 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8"} err="failed to get container status \"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8\": rpc error: code = NotFound desc = could not find container \"bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8\": container with ID starting with bea58bb78222a9e1c4ec7920c6638dd2f357eccd9533d7cb72717eaf915086b8 not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.861814 4869 scope.go:117] "RemoveContainer" containerID="0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.862013 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d"} err="failed to get container status \"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d\": rpc error: code = NotFound desc = could not find container \"0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d\": container with ID starting with 0d40ef0baed0874e2f7ae58a181645d9b812b7fb3becb96fec977f6d7818535d not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.862035 4869 scope.go:117] "RemoveContainer" containerID="1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.862295 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af"} err="failed to get container status \"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af\": rpc error: code = NotFound desc = could not find container \"1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af\": container with ID starting with 1df60f92c3ea4ecd02b28647b8da015516a0f58b5bbada8b298d37ce4ead17af not found: ID does not exist" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.894263 4869 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/10a66559-191f-464a-8095-efb79cc5b29f-config-out\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.894293 4869 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.894304 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktbr7\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-kube-api-access-ktbr7\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.894315 4869 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/10a66559-191f-464a-8095-efb79cc5b29f-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.894354 4869 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") on node \"crc\" " Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.894364 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.894375 4869 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/10a66559-191f-464a-8095-efb79cc5b29f-web-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.894384 4869 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/10a66559-191f-464a-8095-efb79cc5b29f-tls-assets\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.963995 4869 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.964356 4869 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9") on node "crc" Sep 29 14:00:31 crc kubenswrapper[4869]: I0929 14:00:31.996755 4869 reconciler_common.go:293] "Volume detached for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.072820 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.091748 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.105945 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:00:32 crc kubenswrapper[4869]: E0929 14:00:32.106334 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c342e61d-0f2e-4e2c-98a9-c58397596a90" containerName="ovn-config" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106361 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="c342e61d-0f2e-4e2c-98a9-c58397596a90" containerName="ovn-config" Sep 29 14:00:32 crc kubenswrapper[4869]: E0929 14:00:32.106383 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="init-config-reloader" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106389 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="init-config-reloader" Sep 29 14:00:32 crc kubenswrapper[4869]: E0929 14:00:32.106410 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="thanos-sidecar" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106417 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="thanos-sidecar" Sep 29 14:00:32 crc kubenswrapper[4869]: E0929 14:00:32.106433 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="prometheus" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106439 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="prometheus" Sep 29 14:00:32 crc kubenswrapper[4869]: E0929 14:00:32.106448 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="config-reloader" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106454 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="config-reloader" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106640 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="prometheus" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106649 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="thanos-sidecar" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106663 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="c342e61d-0f2e-4e2c-98a9-c58397596a90" containerName="ovn-config" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.106672 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="10a66559-191f-464a-8095-efb79cc5b29f" containerName="config-reloader" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.123812 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.129248 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.129589 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.129803 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-wtzth" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.129998 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.130155 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.142879 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.143075 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.177729 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.261635 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10a66559-191f-464a-8095-efb79cc5b29f" path="/var/lib/kubelet/pods/10a66559-191f-464a-8095-efb79cc5b29f/volumes" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.264430 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c342e61d-0f2e-4e2c-98a9-c58397596a90" path="/var/lib/kubelet/pods/c342e61d-0f2e-4e2c-98a9-c58397596a90/volumes" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.305764 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bxhw\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-kube-api-access-2bxhw\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.305835 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.305906 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.305973 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/52556bdb-2237-4c52-980f-9c3fd051804e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.306005 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.306032 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.306069 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.306121 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.306160 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-config\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.306195 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/52556bdb-2237-4c52-980f-9c3fd051804e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.306239 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408050 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408129 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-config\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408153 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/52556bdb-2237-4c52-980f-9c3fd051804e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408201 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408263 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bxhw\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-kube-api-access-2bxhw\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408286 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408323 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408361 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/52556bdb-2237-4c52-980f-9c3fd051804e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408381 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408400 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.408425 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.409959 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/52556bdb-2237-4c52-980f-9c3fd051804e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.412034 4869 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.412158 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/279150bdb70b2f663e8288477cfabe3c1abac14428cf8cff87f747a1669c049c/globalmount\"" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.413351 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/52556bdb-2237-4c52-980f-9c3fd051804e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.414118 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.414993 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.415457 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.416505 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-config\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.425012 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.439247 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.439759 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bxhw\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-kube-api-access-2bxhw\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.439953 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.460951 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.492765 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.733532 4869 generic.go:334] "Generic (PLEG): container finished" podID="de807e17-2ffa-4679-9156-1905ebf6c3e6" containerID="255dd11c37ea2aaf037ec13f7e55e3c9c705fc87b3e9bfe27c738c04fdf17a2d" exitCode=0 Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.733852 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gn5ht" event={"ID":"de807e17-2ffa-4679-9156-1905ebf6c3e6","Type":"ContainerDied","Data":"255dd11c37ea2aaf037ec13f7e55e3c9c705fc87b3e9bfe27c738c04fdf17a2d"} Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.735970 4869 generic.go:334] "Generic (PLEG): container finished" podID="df9ac455-60af-4f59-9eb1-8dcb8569a21c" containerID="204b34762bcc1c2859973455721efec89dcf694fc8f5ba4610d6eb234eb0eda0" exitCode=0 Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.736119 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-52dht" event={"ID":"df9ac455-60af-4f59-9eb1-8dcb8569a21c","Type":"ContainerDied","Data":"204b34762bcc1c2859973455721efec89dcf694fc8f5ba4610d6eb234eb0eda0"} Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.971558 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:00:32 crc kubenswrapper[4869]: I0929 14:00:32.983254 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f2wnx" Sep 29 14:00:32 crc kubenswrapper[4869]: W0929 14:00:32.983428 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52556bdb_2237_4c52_980f_9c3fd051804e.slice/crio-07718be136405e5ef8abe6039b492c979114dc3bc8971ff1d3c183dcad9e09d9 WatchSource:0}: Error finding container 07718be136405e5ef8abe6039b492c979114dc3bc8971ff1d3c183dcad9e09d9: Status 404 returned error can't find the container with id 07718be136405e5ef8abe6039b492c979114dc3bc8971ff1d3c183dcad9e09d9 Sep 29 14:00:33 crc kubenswrapper[4869]: I0929 14:00:33.137925 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s46pv\" (UniqueName: \"kubernetes.io/projected/c9ee6e01-c73e-499a-b626-ac73939d9af3-kube-api-access-s46pv\") pod \"c9ee6e01-c73e-499a-b626-ac73939d9af3\" (UID: \"c9ee6e01-c73e-499a-b626-ac73939d9af3\") " Sep 29 14:00:33 crc kubenswrapper[4869]: I0929 14:00:33.141485 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9ee6e01-c73e-499a-b626-ac73939d9af3-kube-api-access-s46pv" (OuterVolumeSpecName: "kube-api-access-s46pv") pod "c9ee6e01-c73e-499a-b626-ac73939d9af3" (UID: "c9ee6e01-c73e-499a-b626-ac73939d9af3"). InnerVolumeSpecName "kube-api-access-s46pv". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:33 crc kubenswrapper[4869]: I0929 14:00:33.239741 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s46pv\" (UniqueName: \"kubernetes.io/projected/c9ee6e01-c73e-499a-b626-ac73939d9af3-kube-api-access-s46pv\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:33 crc kubenswrapper[4869]: I0929 14:00:33.752190 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerStarted","Data":"07718be136405e5ef8abe6039b492c979114dc3bc8971ff1d3c183dcad9e09d9"} Sep 29 14:00:33 crc kubenswrapper[4869]: I0929 14:00:33.754592 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f2wnx" Sep 29 14:00:33 crc kubenswrapper[4869]: I0929 14:00:33.754589 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f2wnx" event={"ID":"c9ee6e01-c73e-499a-b626-ac73939d9af3","Type":"ContainerDied","Data":"b197c75f602b147a406017fa90c064010a3cec89594f3f7db0af4c5a7e62aaf4"} Sep 29 14:00:33 crc kubenswrapper[4869]: I0929 14:00:33.755527 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b197c75f602b147a406017fa90c064010a3cec89594f3f7db0af4c5a7e62aaf4" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.371729 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gn5ht" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.381068 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-52dht" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.503546 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkmhp\" (UniqueName: \"kubernetes.io/projected/de807e17-2ffa-4679-9156-1905ebf6c3e6-kube-api-access-wkmhp\") pod \"de807e17-2ffa-4679-9156-1905ebf6c3e6\" (UID: \"de807e17-2ffa-4679-9156-1905ebf6c3e6\") " Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.503602 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndsd7\" (UniqueName: \"kubernetes.io/projected/df9ac455-60af-4f59-9eb1-8dcb8569a21c-kube-api-access-ndsd7\") pod \"df9ac455-60af-4f59-9eb1-8dcb8569a21c\" (UID: \"df9ac455-60af-4f59-9eb1-8dcb8569a21c\") " Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.510132 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de807e17-2ffa-4679-9156-1905ebf6c3e6-kube-api-access-wkmhp" (OuterVolumeSpecName: "kube-api-access-wkmhp") pod "de807e17-2ffa-4679-9156-1905ebf6c3e6" (UID: "de807e17-2ffa-4679-9156-1905ebf6c3e6"). InnerVolumeSpecName "kube-api-access-wkmhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.511091 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df9ac455-60af-4f59-9eb1-8dcb8569a21c-kube-api-access-ndsd7" (OuterVolumeSpecName: "kube-api-access-ndsd7") pod "df9ac455-60af-4f59-9eb1-8dcb8569a21c" (UID: "df9ac455-60af-4f59-9eb1-8dcb8569a21c"). InnerVolumeSpecName "kube-api-access-ndsd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.605449 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkmhp\" (UniqueName: \"kubernetes.io/projected/de807e17-2ffa-4679-9156-1905ebf6c3e6-kube-api-access-wkmhp\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.605492 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndsd7\" (UniqueName: \"kubernetes.io/projected/df9ac455-60af-4f59-9eb1-8dcb8569a21c-kube-api-access-ndsd7\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.786552 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gn5ht" event={"ID":"de807e17-2ffa-4679-9156-1905ebf6c3e6","Type":"ContainerDied","Data":"ca25ca703226e33cca5066d45c4c70d7c36336b9f317836bf216426006c6c0a1"} Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.786595 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca25ca703226e33cca5066d45c4c70d7c36336b9f317836bf216426006c6c0a1" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.786675 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gn5ht" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.796023 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-52dht" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.796060 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-52dht" event={"ID":"df9ac455-60af-4f59-9eb1-8dcb8569a21c","Type":"ContainerDied","Data":"e91cb439415e27ff5ee598131174ddd078231c02fbfb92d590c5ea1451267d89"} Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.796092 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e91cb439415e27ff5ee598131174ddd078231c02fbfb92d590c5ea1451267d89" Sep 29 14:00:36 crc kubenswrapper[4869]: I0929 14:00:36.798089 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerStarted","Data":"3f2782b835abbcf7e039732da011606c2d99078cf4074344b37e4de565416777"} Sep 29 14:00:40 crc kubenswrapper[4869]: I0929 14:00:40.830421 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qghzx" event={"ID":"bf1f0c5e-7cbc-4d6f-afc4-241de854da60","Type":"ContainerStarted","Data":"894d47b410b578430459cc03e5040ad2a4bd1b4b07eff321296a3170c8de05bc"} Sep 29 14:00:40 crc kubenswrapper[4869]: I0929 14:00:40.832050 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-hdwlg" event={"ID":"dae249ed-58ac-41dc-b84c-59ba9f80d003","Type":"ContainerStarted","Data":"3202ffa8eca92c287d35f416b8c3855cf995f57086a845695a6ff1526d0177f0"} Sep 29 14:00:40 crc kubenswrapper[4869]: I0929 14:00:40.833523 4869 generic.go:334] "Generic (PLEG): container finished" podID="62ecf715-d247-4d7d-baa0-2c929a73a141" containerID="b5ea25fc052f09c2b7433621953e6df568bb6a63e11f83d9ff6e0e5305c29467" exitCode=0 Sep 29 14:00:40 crc kubenswrapper[4869]: I0929 14:00:40.833570 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-npjf6" event={"ID":"62ecf715-d247-4d7d-baa0-2c929a73a141","Type":"ContainerDied","Data":"b5ea25fc052f09c2b7433621953e6df568bb6a63e11f83d9ff6e0e5305c29467"} Sep 29 14:00:40 crc kubenswrapper[4869]: I0929 14:00:40.847404 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-qghzx" podStartSLOduration=3.408896262 podStartE2EDuration="12.847388325s" podCreationTimestamp="2025-09-29 14:00:28 +0000 UTC" firstStartedPulling="2025-09-29 14:00:31.212386579 +0000 UTC m=+1157.653030889" lastFinishedPulling="2025-09-29 14:00:40.650878632 +0000 UTC m=+1167.091522952" observedRunningTime="2025-09-29 14:00:40.845847854 +0000 UTC m=+1167.286492204" watchObservedRunningTime="2025-09-29 14:00:40.847388325 +0000 UTC m=+1167.288032645" Sep 29 14:00:40 crc kubenswrapper[4869]: I0929 14:00:40.867736 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-hdwlg" podStartSLOduration=5.154710293 podStartE2EDuration="13.867717643s" podCreationTimestamp="2025-09-29 14:00:27 +0000 UTC" firstStartedPulling="2025-09-29 14:00:31.443508629 +0000 UTC m=+1157.884152939" lastFinishedPulling="2025-09-29 14:00:40.156515969 +0000 UTC m=+1166.597160289" observedRunningTime="2025-09-29 14:00:40.867352254 +0000 UTC m=+1167.307996574" watchObservedRunningTime="2025-09-29 14:00:40.867717643 +0000 UTC m=+1167.308361963" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.518852 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.630551 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5rpn\" (UniqueName: \"kubernetes.io/projected/62ecf715-d247-4d7d-baa0-2c929a73a141-kube-api-access-x5rpn\") pod \"62ecf715-d247-4d7d-baa0-2c929a73a141\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.630675 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-combined-ca-bundle\") pod \"62ecf715-d247-4d7d-baa0-2c929a73a141\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.630718 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-db-sync-config-data\") pod \"62ecf715-d247-4d7d-baa0-2c929a73a141\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.631033 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-config-data\") pod \"62ecf715-d247-4d7d-baa0-2c929a73a141\" (UID: \"62ecf715-d247-4d7d-baa0-2c929a73a141\") " Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.646854 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62ecf715-d247-4d7d-baa0-2c929a73a141-kube-api-access-x5rpn" (OuterVolumeSpecName: "kube-api-access-x5rpn") pod "62ecf715-d247-4d7d-baa0-2c929a73a141" (UID: "62ecf715-d247-4d7d-baa0-2c929a73a141"). InnerVolumeSpecName "kube-api-access-x5rpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.647807 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "62ecf715-d247-4d7d-baa0-2c929a73a141" (UID: "62ecf715-d247-4d7d-baa0-2c929a73a141"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.670405 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62ecf715-d247-4d7d-baa0-2c929a73a141" (UID: "62ecf715-d247-4d7d-baa0-2c929a73a141"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.681084 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-config-data" (OuterVolumeSpecName: "config-data") pod "62ecf715-d247-4d7d-baa0-2c929a73a141" (UID: "62ecf715-d247-4d7d-baa0-2c929a73a141"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.733432 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.733464 4869 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.733474 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62ecf715-d247-4d7d-baa0-2c929a73a141-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.733484 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5rpn\" (UniqueName: \"kubernetes.io/projected/62ecf715-d247-4d7d-baa0-2c929a73a141-kube-api-access-x5rpn\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.856798 4869 generic.go:334] "Generic (PLEG): container finished" podID="52556bdb-2237-4c52-980f-9c3fd051804e" containerID="3f2782b835abbcf7e039732da011606c2d99078cf4074344b37e4de565416777" exitCode=0 Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.856861 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerDied","Data":"3f2782b835abbcf7e039732da011606c2d99078cf4074344b37e4de565416777"} Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.858430 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-npjf6" event={"ID":"62ecf715-d247-4d7d-baa0-2c929a73a141","Type":"ContainerDied","Data":"8e55ea4b1b725066d2259f17ef83160a5c09d59038f4fc376ed8ee4c817c3579"} Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.858451 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e55ea4b1b725066d2259f17ef83160a5c09d59038f4fc376ed8ee4c817c3579" Sep 29 14:00:42 crc kubenswrapper[4869]: I0929 14:00:42.858526 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-npjf6" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.260039 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55bb5bc7bf-hbx2n"] Sep 29 14:00:43 crc kubenswrapper[4869]: E0929 14:00:43.260834 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de807e17-2ffa-4679-9156-1905ebf6c3e6" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.260851 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="de807e17-2ffa-4679-9156-1905ebf6c3e6" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: E0929 14:00:43.260869 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62ecf715-d247-4d7d-baa0-2c929a73a141" containerName="glance-db-sync" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.260877 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="62ecf715-d247-4d7d-baa0-2c929a73a141" containerName="glance-db-sync" Sep 29 14:00:43 crc kubenswrapper[4869]: E0929 14:00:43.260899 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df9ac455-60af-4f59-9eb1-8dcb8569a21c" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.260907 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="df9ac455-60af-4f59-9eb1-8dcb8569a21c" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: E0929 14:00:43.260937 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ee6e01-c73e-499a-b626-ac73939d9af3" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.260945 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ee6e01-c73e-499a-b626-ac73939d9af3" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.261145 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ee6e01-c73e-499a-b626-ac73939d9af3" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.261167 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="62ecf715-d247-4d7d-baa0-2c929a73a141" containerName="glance-db-sync" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.261183 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="df9ac455-60af-4f59-9eb1-8dcb8569a21c" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.261197 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="de807e17-2ffa-4679-9156-1905ebf6c3e6" containerName="mariadb-database-create" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.262341 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.282702 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55bb5bc7bf-hbx2n"] Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.446789 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6vd4\" (UniqueName: \"kubernetes.io/projected/d33f5d23-ad2b-47d7-90be-85d9105a6530-kube-api-access-f6vd4\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.446858 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-sb\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.447253 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-config\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.447451 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-nb\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.447581 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-dns-svc\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.549597 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-nb\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.549708 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-dns-svc\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.549772 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6vd4\" (UniqueName: \"kubernetes.io/projected/d33f5d23-ad2b-47d7-90be-85d9105a6530-kube-api-access-f6vd4\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.549806 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-sb\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.549882 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-config\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.551317 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-nb\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.551353 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-dns-svc\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.551450 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-sb\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.551493 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-config\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.569487 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6vd4\" (UniqueName: \"kubernetes.io/projected/d33f5d23-ad2b-47d7-90be-85d9105a6530-kube-api-access-f6vd4\") pod \"dnsmasq-dns-55bb5bc7bf-hbx2n\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.579756 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:43 crc kubenswrapper[4869]: I0929 14:00:43.876205 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerStarted","Data":"2f81a502be902eb7059d41f360b8ca58b8ccbb8f3b5a58c0634ea00280c2d49c"} Sep 29 14:00:44 crc kubenswrapper[4869]: I0929 14:00:44.031338 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55bb5bc7bf-hbx2n"] Sep 29 14:00:44 crc kubenswrapper[4869]: W0929 14:00:44.034105 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd33f5d23_ad2b_47d7_90be_85d9105a6530.slice/crio-785ccd0724723ec190c695ebb6e5a221fa2c971f662750b5cd7acc746e3580ec WatchSource:0}: Error finding container 785ccd0724723ec190c695ebb6e5a221fa2c971f662750b5cd7acc746e3580ec: Status 404 returned error can't find the container with id 785ccd0724723ec190c695ebb6e5a221fa2c971f662750b5cd7acc746e3580ec Sep 29 14:00:44 crc kubenswrapper[4869]: I0929 14:00:44.884828 4869 generic.go:334] "Generic (PLEG): container finished" podID="d33f5d23-ad2b-47d7-90be-85d9105a6530" containerID="cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84" exitCode=0 Sep 29 14:00:44 crc kubenswrapper[4869]: I0929 14:00:44.884986 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" event={"ID":"d33f5d23-ad2b-47d7-90be-85d9105a6530","Type":"ContainerDied","Data":"cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84"} Sep 29 14:00:44 crc kubenswrapper[4869]: I0929 14:00:44.885221 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" event={"ID":"d33f5d23-ad2b-47d7-90be-85d9105a6530","Type":"ContainerStarted","Data":"785ccd0724723ec190c695ebb6e5a221fa2c971f662750b5cd7acc746e3580ec"} Sep 29 14:00:45 crc kubenswrapper[4869]: I0929 14:00:45.895158 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" event={"ID":"d33f5d23-ad2b-47d7-90be-85d9105a6530","Type":"ContainerStarted","Data":"72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546"} Sep 29 14:00:45 crc kubenswrapper[4869]: I0929 14:00:45.895718 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:45 crc kubenswrapper[4869]: I0929 14:00:45.905501 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerStarted","Data":"bb2716c367616610ddbc81f82b6cd15b2c425a50813b1ce26ab4abba72b43649"} Sep 29 14:00:45 crc kubenswrapper[4869]: I0929 14:00:45.905541 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerStarted","Data":"4d42727421dde2c640e05125b24fa54f80f223c8ffffde6a4bd0648f8f03f89a"} Sep 29 14:00:45 crc kubenswrapper[4869]: I0929 14:00:45.946426 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" podStartSLOduration=2.946408314 podStartE2EDuration="2.946408314s" podCreationTimestamp="2025-09-29 14:00:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:00:45.944842753 +0000 UTC m=+1172.385487093" watchObservedRunningTime="2025-09-29 14:00:45.946408314 +0000 UTC m=+1172.387052634" Sep 29 14:00:46 crc kubenswrapper[4869]: I0929 14:00:46.002047 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=14.002022851 podStartE2EDuration="14.002022851s" podCreationTimestamp="2025-09-29 14:00:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:00:45.990349217 +0000 UTC m=+1172.430993537" watchObservedRunningTime="2025-09-29 14:00:46.002022851 +0000 UTC m=+1172.442667171" Sep 29 14:00:46 crc kubenswrapper[4869]: I0929 14:00:46.915670 4869 generic.go:334] "Generic (PLEG): container finished" podID="bf1f0c5e-7cbc-4d6f-afc4-241de854da60" containerID="894d47b410b578430459cc03e5040ad2a4bd1b4b07eff321296a3170c8de05bc" exitCode=0 Sep 29 14:00:46 crc kubenswrapper[4869]: I0929 14:00:46.915750 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qghzx" event={"ID":"bf1f0c5e-7cbc-4d6f-afc4-241de854da60","Type":"ContainerDied","Data":"894d47b410b578430459cc03e5040ad2a4bd1b4b07eff321296a3170c8de05bc"} Sep 29 14:00:46 crc kubenswrapper[4869]: I0929 14:00:46.917737 4869 generic.go:334] "Generic (PLEG): container finished" podID="dae249ed-58ac-41dc-b84c-59ba9f80d003" containerID="3202ffa8eca92c287d35f416b8c3855cf995f57086a845695a6ff1526d0177f0" exitCode=0 Sep 29 14:00:46 crc kubenswrapper[4869]: I0929 14:00:46.917824 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-hdwlg" event={"ID":"dae249ed-58ac-41dc-b84c-59ba9f80d003","Type":"ContainerDied","Data":"3202ffa8eca92c287d35f416b8c3855cf995f57086a845695a6ff1526d0177f0"} Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.406958 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-1f59-account-create-jczsh"] Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.408215 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1f59-account-create-jczsh" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.409809 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.418867 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-1f59-account-create-jczsh"] Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.495055 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.495110 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.501799 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.530475 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzqg4\" (UniqueName: \"kubernetes.io/projected/d6da504f-b624-44ac-8baa-8e144014ecea-kube-api-access-lzqg4\") pod \"cinder-1f59-account-create-jczsh\" (UID: \"d6da504f-b624-44ac-8baa-8e144014ecea\") " pod="openstack/cinder-1f59-account-create-jczsh" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.605957 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-de12-account-create-xmpfv"] Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.607398 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-de12-account-create-xmpfv" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.609163 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.614551 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-de12-account-create-xmpfv"] Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.631834 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzqg4\" (UniqueName: \"kubernetes.io/projected/d6da504f-b624-44ac-8baa-8e144014ecea-kube-api-access-lzqg4\") pod \"cinder-1f59-account-create-jczsh\" (UID: \"d6da504f-b624-44ac-8baa-8e144014ecea\") " pod="openstack/cinder-1f59-account-create-jczsh" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.649499 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzqg4\" (UniqueName: \"kubernetes.io/projected/d6da504f-b624-44ac-8baa-8e144014ecea-kube-api-access-lzqg4\") pod \"cinder-1f59-account-create-jczsh\" (UID: \"d6da504f-b624-44ac-8baa-8e144014ecea\") " pod="openstack/cinder-1f59-account-create-jczsh" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.733703 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7mds\" (UniqueName: \"kubernetes.io/projected/8bc1ab68-07de-40a8-bae9-49548389842e-kube-api-access-m7mds\") pod \"barbican-de12-account-create-xmpfv\" (UID: \"8bc1ab68-07de-40a8-bae9-49548389842e\") " pod="openstack/barbican-de12-account-create-xmpfv" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.734224 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1f59-account-create-jczsh" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.815026 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-b9f8-account-create-lzj29"] Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.816378 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b9f8-account-create-lzj29" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.818395 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.835862 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7mds\" (UniqueName: \"kubernetes.io/projected/8bc1ab68-07de-40a8-bae9-49548389842e-kube-api-access-m7mds\") pod \"barbican-de12-account-create-xmpfv\" (UID: \"8bc1ab68-07de-40a8-bae9-49548389842e\") " pod="openstack/barbican-de12-account-create-xmpfv" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.838217 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b9f8-account-create-lzj29"] Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.857078 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7mds\" (UniqueName: \"kubernetes.io/projected/8bc1ab68-07de-40a8-bae9-49548389842e-kube-api-access-m7mds\") pod \"barbican-de12-account-create-xmpfv\" (UID: \"8bc1ab68-07de-40a8-bae9-49548389842e\") " pod="openstack/barbican-de12-account-create-xmpfv" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.927019 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-de12-account-create-xmpfv" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.932195 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Sep 29 14:00:47 crc kubenswrapper[4869]: I0929 14:00:47.939262 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86ccr\" (UniqueName: \"kubernetes.io/projected/38c7f4e3-4845-42a1-8496-18a55de398e6-kube-api-access-86ccr\") pod \"neutron-b9f8-account-create-lzj29\" (UID: \"38c7f4e3-4845-42a1-8496-18a55de398e6\") " pod="openstack/neutron-b9f8-account-create-lzj29" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.043273 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86ccr\" (UniqueName: \"kubernetes.io/projected/38c7f4e3-4845-42a1-8496-18a55de398e6-kube-api-access-86ccr\") pod \"neutron-b9f8-account-create-lzj29\" (UID: \"38c7f4e3-4845-42a1-8496-18a55de398e6\") " pod="openstack/neutron-b9f8-account-create-lzj29" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.074804 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86ccr\" (UniqueName: \"kubernetes.io/projected/38c7f4e3-4845-42a1-8496-18a55de398e6-kube-api-access-86ccr\") pod \"neutron-b9f8-account-create-lzj29\" (UID: \"38c7f4e3-4845-42a1-8496-18a55de398e6\") " pod="openstack/neutron-b9f8-account-create-lzj29" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.220844 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b9f8-account-create-lzj29" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.516884 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.521976 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-1f59-account-create-jczsh"] Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.539150 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.656694 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-combined-ca-bundle\") pod \"dae249ed-58ac-41dc-b84c-59ba9f80d003\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.656769 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-combined-ca-bundle\") pod \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.656903 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-db-sync-config-data\") pod \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.656972 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hl7bx\" (UniqueName: \"kubernetes.io/projected/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-kube-api-access-hl7bx\") pod \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.657001 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xl95v\" (UniqueName: \"kubernetes.io/projected/dae249ed-58ac-41dc-b84c-59ba9f80d003-kube-api-access-xl95v\") pod \"dae249ed-58ac-41dc-b84c-59ba9f80d003\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.657048 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-config-data\") pod \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\" (UID: \"bf1f0c5e-7cbc-4d6f-afc4-241de854da60\") " Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.657091 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-config-data\") pod \"dae249ed-58ac-41dc-b84c-59ba9f80d003\" (UID: \"dae249ed-58ac-41dc-b84c-59ba9f80d003\") " Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.661342 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-kube-api-access-hl7bx" (OuterVolumeSpecName: "kube-api-access-hl7bx") pod "bf1f0c5e-7cbc-4d6f-afc4-241de854da60" (UID: "bf1f0c5e-7cbc-4d6f-afc4-241de854da60"). InnerVolumeSpecName "kube-api-access-hl7bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.663795 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "bf1f0c5e-7cbc-4d6f-afc4-241de854da60" (UID: "bf1f0c5e-7cbc-4d6f-afc4-241de854da60"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.666044 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dae249ed-58ac-41dc-b84c-59ba9f80d003-kube-api-access-xl95v" (OuterVolumeSpecName: "kube-api-access-xl95v") pod "dae249ed-58ac-41dc-b84c-59ba9f80d003" (UID: "dae249ed-58ac-41dc-b84c-59ba9f80d003"). InnerVolumeSpecName "kube-api-access-xl95v". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.704922 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dae249ed-58ac-41dc-b84c-59ba9f80d003" (UID: "dae249ed-58ac-41dc-b84c-59ba9f80d003"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.708519 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-de12-account-create-xmpfv"] Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.718835 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf1f0c5e-7cbc-4d6f-afc4-241de854da60" (UID: "bf1f0c5e-7cbc-4d6f-afc4-241de854da60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.740720 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-config-data" (OuterVolumeSpecName: "config-data") pod "bf1f0c5e-7cbc-4d6f-afc4-241de854da60" (UID: "bf1f0c5e-7cbc-4d6f-afc4-241de854da60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.741027 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-config-data" (OuterVolumeSpecName: "config-data") pod "dae249ed-58ac-41dc-b84c-59ba9f80d003" (UID: "dae249ed-58ac-41dc-b84c-59ba9f80d003"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.759328 4869 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.759362 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hl7bx\" (UniqueName: \"kubernetes.io/projected/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-kube-api-access-hl7bx\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.759373 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xl95v\" (UniqueName: \"kubernetes.io/projected/dae249ed-58ac-41dc-b84c-59ba9f80d003-kube-api-access-xl95v\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.759382 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.759391 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.759400 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dae249ed-58ac-41dc-b84c-59ba9f80d003-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.759408 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf1f0c5e-7cbc-4d6f-afc4-241de854da60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.832823 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b9f8-account-create-lzj29"] Sep 29 14:00:48 crc kubenswrapper[4869]: W0929 14:00:48.835482 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38c7f4e3_4845_42a1_8496_18a55de398e6.slice/crio-2f874b166c2e84292598d411ce01cb7a405a87613217501d3c1cd0759680eb9b WatchSource:0}: Error finding container 2f874b166c2e84292598d411ce01cb7a405a87613217501d3c1cd0759680eb9b: Status 404 returned error can't find the container with id 2f874b166c2e84292598d411ce01cb7a405a87613217501d3c1cd0759680eb9b Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.949513 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b9f8-account-create-lzj29" event={"ID":"38c7f4e3-4845-42a1-8496-18a55de398e6","Type":"ContainerStarted","Data":"2f874b166c2e84292598d411ce01cb7a405a87613217501d3c1cd0759680eb9b"} Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.951092 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-de12-account-create-xmpfv" event={"ID":"8bc1ab68-07de-40a8-bae9-49548389842e","Type":"ContainerStarted","Data":"3ba39e23aec9a46a93a5591d57347a6fcd1a597fcd73b5fc5d525969f971d11a"} Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.961208 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1f59-account-create-jczsh" event={"ID":"d6da504f-b624-44ac-8baa-8e144014ecea","Type":"ContainerDied","Data":"15ac72790e3fa861235f9575970b77d6f7273d7b04834e83edf5a00f5fd9382c"} Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.961365 4869 generic.go:334] "Generic (PLEG): container finished" podID="d6da504f-b624-44ac-8baa-8e144014ecea" containerID="15ac72790e3fa861235f9575970b77d6f7273d7b04834e83edf5a00f5fd9382c" exitCode=0 Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.961528 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1f59-account-create-jczsh" event={"ID":"d6da504f-b624-44ac-8baa-8e144014ecea","Type":"ContainerStarted","Data":"a5e03387f146b0d9b3f1579e7f0b54a7cc0a0fbd50b672145ea6a0d95891c820"} Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.966814 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qghzx" event={"ID":"bf1f0c5e-7cbc-4d6f-afc4-241de854da60","Type":"ContainerDied","Data":"be5cf8363bf6316273034e92e897cd1f64c23977a2951eb0a4e8891ac30771e2"} Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.966857 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qghzx" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.966857 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be5cf8363bf6316273034e92e897cd1f64c23977a2951eb0a4e8891ac30771e2" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.969432 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-hdwlg" Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.969449 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-hdwlg" event={"ID":"dae249ed-58ac-41dc-b84c-59ba9f80d003","Type":"ContainerDied","Data":"f34eefb30d8d3c327a3ed8d879dfb42b6cb5d245f7634d4fd1525f4657ae8e24"} Sep 29 14:00:48 crc kubenswrapper[4869]: I0929 14:00:48.969529 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f34eefb30d8d3c327a3ed8d879dfb42b6cb5d245f7634d4fd1525f4657ae8e24" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.134964 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55bb5bc7bf-hbx2n"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.135203 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" podUID="d33f5d23-ad2b-47d7-90be-85d9105a6530" containerName="dnsmasq-dns" containerID="cri-o://72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546" gracePeriod=10 Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.185133 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-656cf9cdf-c6kqv"] Sep 29 14:00:49 crc kubenswrapper[4869]: E0929 14:00:49.191666 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf1f0c5e-7cbc-4d6f-afc4-241de854da60" containerName="watcher-db-sync" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.191715 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf1f0c5e-7cbc-4d6f-afc4-241de854da60" containerName="watcher-db-sync" Sep 29 14:00:49 crc kubenswrapper[4869]: E0929 14:00:49.191742 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dae249ed-58ac-41dc-b84c-59ba9f80d003" containerName="keystone-db-sync" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.191750 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="dae249ed-58ac-41dc-b84c-59ba9f80d003" containerName="keystone-db-sync" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.257620 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf1f0c5e-7cbc-4d6f-afc4-241de854da60" containerName="watcher-db-sync" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.257717 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="dae249ed-58ac-41dc-b84c-59ba9f80d003" containerName="keystone-db-sync" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.265173 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.274499 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-656cf9cdf-c6kqv"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.301711 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-22z67"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.303335 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.313016 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.313652 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.339388 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-77f6b" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.339651 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.374908 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4slk\" (UniqueName: \"kubernetes.io/projected/7a20ab29-5716-440d-b31f-abc85628a444-kube-api-access-g4slk\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.374948 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-scripts\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.374975 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-config-data\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.374993 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-config\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.375015 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-dns-svc\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.375038 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-sb\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.375078 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-fernet-keys\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.375098 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-combined-ca-bundle\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.375115 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-credential-keys\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.375139 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-nb\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.375175 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztdk9\" (UniqueName: \"kubernetes.io/projected/3268714c-4568-4774-b047-a31ae449db77-kube-api-access-ztdk9\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.380784 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-22z67"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.416684 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.418390 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.422537 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-4smxq" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.422900 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.448854 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.471705 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.473884 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.476845 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.476903 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-fernet-keys\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.476935 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-combined-ca-bundle\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.476963 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-credential-keys\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.476992 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-nb\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477019 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477045 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-config-data\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477080 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztdk9\" (UniqueName: \"kubernetes.io/projected/3268714c-4568-4774-b047-a31ae449db77-kube-api-access-ztdk9\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477099 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvq45\" (UniqueName: \"kubernetes.io/projected/9d009b88-eac6-4112-9cd5-aa7d9aed7811-kube-api-access-pvq45\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477165 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4slk\" (UniqueName: \"kubernetes.io/projected/7a20ab29-5716-440d-b31f-abc85628a444-kube-api-access-g4slk\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477193 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-scripts\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477221 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-config-data\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477240 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d009b88-eac6-4112-9cd5-aa7d9aed7811-logs\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477262 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-config\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477318 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-dns-svc\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.477354 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-sb\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.478342 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-sb\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.479178 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.484499 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.486139 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.487632 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-nb\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.490906 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-config\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.490973 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.491568 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-dns-svc\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.492205 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-scripts\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.506642 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-config-data\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.508251 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-credential-keys\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.510105 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-fernet-keys\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.515914 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-combined-ca-bundle\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.544488 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4slk\" (UniqueName: \"kubernetes.io/projected/7a20ab29-5716-440d-b31f-abc85628a444-kube-api-access-g4slk\") pod \"keystone-bootstrap-22z67\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.551259 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.554261 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztdk9\" (UniqueName: \"kubernetes.io/projected/3268714c-4568-4774-b047-a31ae449db77-kube-api-access-ztdk9\") pod \"dnsmasq-dns-656cf9cdf-c6kqv\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.590795 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.590846 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-config-data\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.590877 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.590914 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvq45\" (UniqueName: \"kubernetes.io/projected/9d009b88-eac6-4112-9cd5-aa7d9aed7811-kube-api-access-pvq45\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.590949 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-config-data\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.590975 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pmlf\" (UniqueName: \"kubernetes.io/projected/50dde599-0ce1-482d-b6ab-402e7b9a9997-kube-api-access-7pmlf\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.591023 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d009b88-eac6-4112-9cd5-aa7d9aed7811-logs\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.591053 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.591176 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpxgr\" (UniqueName: \"kubernetes.io/projected/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-kube-api-access-fpxgr\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.591209 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50dde599-0ce1-482d-b6ab-402e7b9a9997-logs\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.591235 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.591254 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.591282 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-config-data\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.591299 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-logs\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.612301 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.612403 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.612581 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d009b88-eac6-4112-9cd5-aa7d9aed7811-logs\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.612742 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-config-data\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.617454 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.621127 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvq45\" (UniqueName: \"kubernetes.io/projected/9d009b88-eac6-4112-9cd5-aa7d9aed7811-kube-api-access-pvq45\") pod \"watcher-decision-engine-0\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.642473 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.658642 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.660698 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.662978 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.663122 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.674672 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.676260 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-vxszp"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.677679 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.684465 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.684908 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-pdcrv" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.685094 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.685263 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.703735 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-config-data\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.703777 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pmlf\" (UniqueName: \"kubernetes.io/projected/50dde599-0ce1-482d-b6ab-402e7b9a9997-kube-api-access-7pmlf\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.703835 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.703869 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpxgr\" (UniqueName: \"kubernetes.io/projected/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-kube-api-access-fpxgr\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.703899 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50dde599-0ce1-482d-b6ab-402e7b9a9997-logs\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.703925 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.703954 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-config-data\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.703972 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-logs\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.704003 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.710574 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50dde599-0ce1-482d-b6ab-402e7b9a9997-logs\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.710749 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.710832 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-logs\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.714527 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.715172 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.715220 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vxszp"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.716529 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-config-data\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.729334 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pmlf\" (UniqueName: \"kubernetes.io/projected/50dde599-0ce1-482d-b6ab-402e7b9a9997-kube-api-access-7pmlf\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.731268 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpxgr\" (UniqueName: \"kubernetes.io/projected/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-kube-api-access-fpxgr\") pod \"watcher-api-0\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.735152 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-config-data\") pod \"watcher-applier-0\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.746991 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-656cf9cdf-c6kqv"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.748341 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.759052 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f8fbbf5-nd8gs"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.760873 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.765300 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f8fbbf5-nd8gs"] Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806436 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-245f4\" (UniqueName: \"kubernetes.io/projected/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-kube-api-access-245f4\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806800 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-logs\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806832 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-combined-ca-bundle\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806858 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-scripts\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806876 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-run-httpd\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806899 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-config-data\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806921 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-scripts\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806937 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.806956 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr55n\" (UniqueName: \"kubernetes.io/projected/83e71a75-00ee-4764-83ce-1ca265589a29-kube-api-access-tr55n\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.807000 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.807025 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-config-data\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.807063 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-log-httpd\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.851041 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.874778 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909047 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfgcd\" (UniqueName: \"kubernetes.io/projected/50f38cd8-6393-417f-93cb-9cc3b4e75e93-kube-api-access-sfgcd\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909096 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-logs\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909127 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-config\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909167 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-combined-ca-bundle\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909197 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-scripts\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909223 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-run-httpd\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909253 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-dns-svc\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909273 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-config-data\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909303 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-scripts\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909325 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909348 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-sb\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909423 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr55n\" (UniqueName: \"kubernetes.io/projected/83e71a75-00ee-4764-83ce-1ca265589a29-kube-api-access-tr55n\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909509 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909537 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-nb\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909568 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-config-data\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909637 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-log-httpd\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909681 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-245f4\" (UniqueName: \"kubernetes.io/projected/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-kube-api-access-245f4\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.909699 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-logs\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.910175 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-run-httpd\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.912029 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-log-httpd\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.916509 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-scripts\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.917146 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-config-data\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.922919 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-config-data\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.923998 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-scripts\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.925352 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-combined-ca-bundle\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.935108 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.935438 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.936363 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-245f4\" (UniqueName: \"kubernetes.io/projected/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-kube-api-access-245f4\") pod \"placement-db-sync-vxszp\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.961906 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr55n\" (UniqueName: \"kubernetes.io/projected/83e71a75-00ee-4764-83ce-1ca265589a29-kube-api-access-tr55n\") pod \"ceilometer-0\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " pod="openstack/ceilometer-0" Sep 29 14:00:49 crc kubenswrapper[4869]: I0929 14:00:49.994788 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.013288 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfgcd\" (UniqueName: \"kubernetes.io/projected/50f38cd8-6393-417f-93cb-9cc3b4e75e93-kube-api-access-sfgcd\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.013344 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-config\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.013410 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-dns-svc\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.013448 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-sb\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.013506 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-nb\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.015197 4869 generic.go:334] "Generic (PLEG): container finished" podID="38c7f4e3-4845-42a1-8496-18a55de398e6" containerID="eaf2463bab505fc2a57a29dce4f2cda19bc17e4f1582dae8866b0e748a217ecb" exitCode=0 Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.015304 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b9f8-account-create-lzj29" event={"ID":"38c7f4e3-4845-42a1-8496-18a55de398e6","Type":"ContainerDied","Data":"eaf2463bab505fc2a57a29dce4f2cda19bc17e4f1582dae8866b0e748a217ecb"} Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.015323 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-nb\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.016929 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-config\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.017246 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-dns-svc\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.017666 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-sb\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.022384 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vxszp" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.057378 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfgcd\" (UniqueName: \"kubernetes.io/projected/50f38cd8-6393-417f-93cb-9cc3b4e75e93-kube-api-access-sfgcd\") pod \"dnsmasq-dns-74f8fbbf5-nd8gs\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.060838 4869 generic.go:334] "Generic (PLEG): container finished" podID="8bc1ab68-07de-40a8-bae9-49548389842e" containerID="28f15fcd29f2e42dc3ace4082b33b77f840aaf12cd9d59c8540efffc609c7c37" exitCode=0 Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.060912 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-de12-account-create-xmpfv" event={"ID":"8bc1ab68-07de-40a8-bae9-49548389842e","Type":"ContainerDied","Data":"28f15fcd29f2e42dc3ace4082b33b77f840aaf12cd9d59c8540efffc609c7c37"} Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.086902 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.096826 4869 generic.go:334] "Generic (PLEG): container finished" podID="d33f5d23-ad2b-47d7-90be-85d9105a6530" containerID="72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546" exitCode=0 Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.097036 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" event={"ID":"d33f5d23-ad2b-47d7-90be-85d9105a6530","Type":"ContainerDied","Data":"72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546"} Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.097079 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" event={"ID":"d33f5d23-ad2b-47d7-90be-85d9105a6530","Type":"ContainerDied","Data":"785ccd0724723ec190c695ebb6e5a221fa2c971f662750b5cd7acc746e3580ec"} Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.097101 4869 scope.go:117] "RemoveContainer" containerID="72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.152817 4869 scope.go:117] "RemoveContainer" containerID="cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.199372 4869 scope.go:117] "RemoveContainer" containerID="72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546" Sep 29 14:00:50 crc kubenswrapper[4869]: E0929 14:00:50.199914 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546\": container with ID starting with 72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546 not found: ID does not exist" containerID="72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.199962 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546"} err="failed to get container status \"72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546\": rpc error: code = NotFound desc = could not find container \"72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546\": container with ID starting with 72bb5b5bf5edc64140ba245a41d011b3abf5593ffd492292d4e9ce95adb10546 not found: ID does not exist" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.199983 4869 scope.go:117] "RemoveContainer" containerID="cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84" Sep 29 14:00:50 crc kubenswrapper[4869]: E0929 14:00:50.200200 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84\": container with ID starting with cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84 not found: ID does not exist" containerID="cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.200226 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84"} err="failed to get container status \"cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84\": rpc error: code = NotFound desc = could not find container \"cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84\": container with ID starting with cac91d71ccdb07574a1cd02c7b2d8e1eb847c8626bc1ade37807c31a69bf7f84 not found: ID does not exist" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.218572 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6vd4\" (UniqueName: \"kubernetes.io/projected/d33f5d23-ad2b-47d7-90be-85d9105a6530-kube-api-access-f6vd4\") pod \"d33f5d23-ad2b-47d7-90be-85d9105a6530\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.218736 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-sb\") pod \"d33f5d23-ad2b-47d7-90be-85d9105a6530\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.218816 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-nb\") pod \"d33f5d23-ad2b-47d7-90be-85d9105a6530\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.218849 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-dns-svc\") pod \"d33f5d23-ad2b-47d7-90be-85d9105a6530\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.218883 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-config\") pod \"d33f5d23-ad2b-47d7-90be-85d9105a6530\" (UID: \"d33f5d23-ad2b-47d7-90be-85d9105a6530\") " Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.234858 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d33f5d23-ad2b-47d7-90be-85d9105a6530-kube-api-access-f6vd4" (OuterVolumeSpecName: "kube-api-access-f6vd4") pod "d33f5d23-ad2b-47d7-90be-85d9105a6530" (UID: "d33f5d23-ad2b-47d7-90be-85d9105a6530"). InnerVolumeSpecName "kube-api-access-f6vd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.344543 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.347003 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6vd4\" (UniqueName: \"kubernetes.io/projected/d33f5d23-ad2b-47d7-90be-85d9105a6530-kube-api-access-f6vd4\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.354345 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-22z67"] Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.357414 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d33f5d23-ad2b-47d7-90be-85d9105a6530" (UID: "d33f5d23-ad2b-47d7-90be-85d9105a6530"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.363676 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d33f5d23-ad2b-47d7-90be-85d9105a6530" (UID: "d33f5d23-ad2b-47d7-90be-85d9105a6530"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.367171 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d33f5d23-ad2b-47d7-90be-85d9105a6530" (UID: "d33f5d23-ad2b-47d7-90be-85d9105a6530"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.405534 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-config" (OuterVolumeSpecName: "config") pod "d33f5d23-ad2b-47d7-90be-85d9105a6530" (UID: "d33f5d23-ad2b-47d7-90be-85d9105a6530"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.448418 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.448450 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.448461 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.448471 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33f5d23-ad2b-47d7-90be-85d9105a6530-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.536481 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-656cf9cdf-c6kqv"] Sep 29 14:00:50 crc kubenswrapper[4869]: W0929 14:00:50.542071 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3268714c_4568_4774_b047_a31ae449db77.slice/crio-e4a5f8cfa3724f463022f40050bda5fd346071c078bbf4bcde3a7d3fd098f9ab WatchSource:0}: Error finding container e4a5f8cfa3724f463022f40050bda5fd346071c078bbf4bcde3a7d3fd098f9ab: Status 404 returned error can't find the container with id e4a5f8cfa3724f463022f40050bda5fd346071c078bbf4bcde3a7d3fd098f9ab Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.561903 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.746590 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.754407 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.893906 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vxszp"] Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.928674 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:00:50 crc kubenswrapper[4869]: I0929 14:00:50.980236 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1f59-account-create-jczsh" Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.063045 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzqg4\" (UniqueName: \"kubernetes.io/projected/d6da504f-b624-44ac-8baa-8e144014ecea-kube-api-access-lzqg4\") pod \"d6da504f-b624-44ac-8baa-8e144014ecea\" (UID: \"d6da504f-b624-44ac-8baa-8e144014ecea\") " Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.081009 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6da504f-b624-44ac-8baa-8e144014ecea-kube-api-access-lzqg4" (OuterVolumeSpecName: "kube-api-access-lzqg4") pod "d6da504f-b624-44ac-8baa-8e144014ecea" (UID: "d6da504f-b624-44ac-8baa-8e144014ecea"). InnerVolumeSpecName "kube-api-access-lzqg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.144757 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"0a3a19fe-a390-45e0-aa17-1531d6d24c7c","Type":"ContainerStarted","Data":"d43f808d884cb1205827434d0b087254e432d8eabc53cb5c6bebba696311fd5d"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.158933 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f8fbbf5-nd8gs"] Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.172502 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzqg4\" (UniqueName: \"kubernetes.io/projected/d6da504f-b624-44ac-8baa-8e144014ecea-kube-api-access-lzqg4\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.174526 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-22z67" event={"ID":"7a20ab29-5716-440d-b31f-abc85628a444","Type":"ContainerStarted","Data":"4e9ab8c5cada21407e25ac2a575ceb4deaa58a7801d53893463db62fdcda333d"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.174564 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-22z67" event={"ID":"7a20ab29-5716-440d-b31f-abc85628a444","Type":"ContainerStarted","Data":"1f149363ebdf61f8c045d4fa6d045e9082e58b82433eae577a83205577956273"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.182151 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bb5bc7bf-hbx2n" Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.216140 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-22z67" podStartSLOduration=2.216121284 podStartE2EDuration="2.216121284s" podCreationTimestamp="2025-09-29 14:00:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:00:51.207764547 +0000 UTC m=+1177.648408867" watchObservedRunningTime="2025-09-29 14:00:51.216121284 +0000 UTC m=+1177.656765594" Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.249925 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerStarted","Data":"b6b6b39b4a8d6f9ba02e9e7369c42a56c64d09b3c0e416590dec51e9d688ad8c"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.285906 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" event={"ID":"3268714c-4568-4774-b047-a31ae449db77","Type":"ContainerStarted","Data":"0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.285955 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" event={"ID":"3268714c-4568-4774-b047-a31ae449db77","Type":"ContainerStarted","Data":"e4a5f8cfa3724f463022f40050bda5fd346071c078bbf4bcde3a7d3fd098f9ab"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.286099 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" podUID="3268714c-4568-4774-b047-a31ae449db77" containerName="init" containerID="cri-o://0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1" gracePeriod=10 Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.304050 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"50dde599-0ce1-482d-b6ab-402e7b9a9997","Type":"ContainerStarted","Data":"f18a6a84cb4a99f3ce944689c4d66e97f1406fd0d57988495d7c60e2ac46a0e9"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.308295 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9d009b88-eac6-4112-9cd5-aa7d9aed7811","Type":"ContainerStarted","Data":"f51961ea90995c6f6ffb51380582dba4b48c6cc116699d1c7db602acd6d17488"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.313733 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vxszp" event={"ID":"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc","Type":"ContainerStarted","Data":"c08d607a7343918bd1bec5d8535d09b1765cd2ae30948e9cb7fd388b8dd8ec94"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.316503 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55bb5bc7bf-hbx2n"] Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.324542 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1f59-account-create-jczsh" Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.324886 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1f59-account-create-jczsh" event={"ID":"d6da504f-b624-44ac-8baa-8e144014ecea","Type":"ContainerDied","Data":"a5e03387f146b0d9b3f1579e7f0b54a7cc0a0fbd50b672145ea6a0d95891c820"} Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.324920 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5e03387f146b0d9b3f1579e7f0b54a7cc0a0fbd50b672145ea6a0d95891c820" Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.390804 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55bb5bc7bf-hbx2n"] Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.902223 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b9f8-account-create-lzj29" Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.912281 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:00:51 crc kubenswrapper[4869]: I0929 14:00:51.997323 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86ccr\" (UniqueName: \"kubernetes.io/projected/38c7f4e3-4845-42a1-8496-18a55de398e6-kube-api-access-86ccr\") pod \"38c7f4e3-4845-42a1-8496-18a55de398e6\" (UID: \"38c7f4e3-4845-42a1-8496-18a55de398e6\") " Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.016983 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38c7f4e3-4845-42a1-8496-18a55de398e6-kube-api-access-86ccr" (OuterVolumeSpecName: "kube-api-access-86ccr") pod "38c7f4e3-4845-42a1-8496-18a55de398e6" (UID: "38c7f4e3-4845-42a1-8496-18a55de398e6"). InnerVolumeSpecName "kube-api-access-86ccr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.116664 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86ccr\" (UniqueName: \"kubernetes.io/projected/38c7f4e3-4845-42a1-8496-18a55de398e6-kube-api-access-86ccr\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.120719 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.172938 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-de12-account-create-xmpfv" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.180295 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.258500 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d33f5d23-ad2b-47d7-90be-85d9105a6530" path="/var/lib/kubelet/pods/d33f5d23-ad2b-47d7-90be-85d9105a6530/volumes" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.320468 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-config\") pod \"3268714c-4568-4774-b047-a31ae449db77\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.320535 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-dns-svc\") pod \"3268714c-4568-4774-b047-a31ae449db77\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.320714 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7mds\" (UniqueName: \"kubernetes.io/projected/8bc1ab68-07de-40a8-bae9-49548389842e-kube-api-access-m7mds\") pod \"8bc1ab68-07de-40a8-bae9-49548389842e\" (UID: \"8bc1ab68-07de-40a8-bae9-49548389842e\") " Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.321216 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztdk9\" (UniqueName: \"kubernetes.io/projected/3268714c-4568-4774-b047-a31ae449db77-kube-api-access-ztdk9\") pod \"3268714c-4568-4774-b047-a31ae449db77\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.321334 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-sb\") pod \"3268714c-4568-4774-b047-a31ae449db77\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.321402 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-nb\") pod \"3268714c-4568-4774-b047-a31ae449db77\" (UID: \"3268714c-4568-4774-b047-a31ae449db77\") " Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.325146 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bc1ab68-07de-40a8-bae9-49548389842e-kube-api-access-m7mds" (OuterVolumeSpecName: "kube-api-access-m7mds") pod "8bc1ab68-07de-40a8-bae9-49548389842e" (UID: "8bc1ab68-07de-40a8-bae9-49548389842e"). InnerVolumeSpecName "kube-api-access-m7mds". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.325699 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3268714c-4568-4774-b047-a31ae449db77-kube-api-access-ztdk9" (OuterVolumeSpecName: "kube-api-access-ztdk9") pod "3268714c-4568-4774-b047-a31ae449db77" (UID: "3268714c-4568-4774-b047-a31ae449db77"). InnerVolumeSpecName "kube-api-access-ztdk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.350407 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3268714c-4568-4774-b047-a31ae449db77" (UID: "3268714c-4568-4774-b047-a31ae449db77"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.350756 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3268714c-4568-4774-b047-a31ae449db77" (UID: "3268714c-4568-4774-b047-a31ae449db77"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.352419 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3268714c-4568-4774-b047-a31ae449db77" (UID: "3268714c-4568-4774-b047-a31ae449db77"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.355885 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"0a3a19fe-a390-45e0-aa17-1531d6d24c7c","Type":"ContainerStarted","Data":"c4c86734d7c47857b97ba1c98a53dc75465678279f0f316204adde0129e5e8f8"} Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.355938 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"0a3a19fe-a390-45e0-aa17-1531d6d24c7c","Type":"ContainerStarted","Data":"4f747068691611864cbc0fbbe29a8ca927a3a92518ec2755b86f978f4b27ae24"} Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.356703 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.362807 4869 generic.go:334] "Generic (PLEG): container finished" podID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" containerID="160ce0cbd49c4afc8ac104db307b6866413b2ef183df86234eee3cd505d3901a" exitCode=0 Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.362868 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" event={"ID":"50f38cd8-6393-417f-93cb-9cc3b4e75e93","Type":"ContainerDied","Data":"160ce0cbd49c4afc8ac104db307b6866413b2ef183df86234eee3cd505d3901a"} Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.362896 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" event={"ID":"50f38cd8-6393-417f-93cb-9cc3b4e75e93","Type":"ContainerStarted","Data":"d948327b87c2c31fce490fdb52274cf7da090ccccc217d244307c70601453f06"} Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.365154 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-config" (OuterVolumeSpecName: "config") pod "3268714c-4568-4774-b047-a31ae449db77" (UID: "3268714c-4568-4774-b047-a31ae449db77"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.368902 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b9f8-account-create-lzj29" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.369441 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b9f8-account-create-lzj29" event={"ID":"38c7f4e3-4845-42a1-8496-18a55de398e6","Type":"ContainerDied","Data":"2f874b166c2e84292598d411ce01cb7a405a87613217501d3c1cd0759680eb9b"} Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.369469 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f874b166c2e84292598d411ce01cb7a405a87613217501d3c1cd0759680eb9b" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.380641 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=3.380603293 podStartE2EDuration="3.380603293s" podCreationTimestamp="2025-09-29 14:00:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:00:52.380199372 +0000 UTC m=+1178.820843702" watchObservedRunningTime="2025-09-29 14:00:52.380603293 +0000 UTC m=+1178.821247613" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.381423 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-de12-account-create-xmpfv" event={"ID":"8bc1ab68-07de-40a8-bae9-49548389842e","Type":"ContainerDied","Data":"3ba39e23aec9a46a93a5591d57347a6fcd1a597fcd73b5fc5d525969f971d11a"} Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.381440 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-de12-account-create-xmpfv" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.381452 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ba39e23aec9a46a93a5591d57347a6fcd1a597fcd73b5fc5d525969f971d11a" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.385747 4869 generic.go:334] "Generic (PLEG): container finished" podID="3268714c-4568-4774-b047-a31ae449db77" containerID="0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1" exitCode=0 Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.385844 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" event={"ID":"3268714c-4568-4774-b047-a31ae449db77","Type":"ContainerDied","Data":"0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1"} Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.385932 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" event={"ID":"3268714c-4568-4774-b047-a31ae449db77","Type":"ContainerDied","Data":"e4a5f8cfa3724f463022f40050bda5fd346071c078bbf4bcde3a7d3fd098f9ab"} Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.385867 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-656cf9cdf-c6kqv" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.385966 4869 scope.go:117] "RemoveContainer" containerID="0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.425808 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.425837 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.425910 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.426686 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7mds\" (UniqueName: \"kubernetes.io/projected/8bc1ab68-07de-40a8-bae9-49548389842e-kube-api-access-m7mds\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.426716 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztdk9\" (UniqueName: \"kubernetes.io/projected/3268714c-4568-4774-b047-a31ae449db77-kube-api-access-ztdk9\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.426728 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3268714c-4568-4774-b047-a31ae449db77-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.438911 4869 scope.go:117] "RemoveContainer" containerID="0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1" Sep 29 14:00:52 crc kubenswrapper[4869]: E0929 14:00:52.446235 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1\": container with ID starting with 0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1 not found: ID does not exist" containerID="0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.446292 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1"} err="failed to get container status \"0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1\": rpc error: code = NotFound desc = could not find container \"0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1\": container with ID starting with 0f3ad3d03612e83bb7b265e70ade574c07adb9cd8a1d97651d50ddcea8f74ed1 not found: ID does not exist" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.467506 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-656cf9cdf-c6kqv"] Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.482836 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-656cf9cdf-c6kqv"] Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769152 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-nmzfx"] Sep 29 14:00:52 crc kubenswrapper[4869]: E0929 14:00:52.769511 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc1ab68-07de-40a8-bae9-49548389842e" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769523 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc1ab68-07de-40a8-bae9-49548389842e" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: E0929 14:00:52.769531 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d33f5d23-ad2b-47d7-90be-85d9105a6530" containerName="init" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769537 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d33f5d23-ad2b-47d7-90be-85d9105a6530" containerName="init" Sep 29 14:00:52 crc kubenswrapper[4869]: E0929 14:00:52.769556 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6da504f-b624-44ac-8baa-8e144014ecea" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769565 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6da504f-b624-44ac-8baa-8e144014ecea" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: E0929 14:00:52.769579 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d33f5d23-ad2b-47d7-90be-85d9105a6530" containerName="dnsmasq-dns" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769585 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d33f5d23-ad2b-47d7-90be-85d9105a6530" containerName="dnsmasq-dns" Sep 29 14:00:52 crc kubenswrapper[4869]: E0929 14:00:52.769596 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3268714c-4568-4774-b047-a31ae449db77" containerName="init" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769602 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="3268714c-4568-4774-b047-a31ae449db77" containerName="init" Sep 29 14:00:52 crc kubenswrapper[4869]: E0929 14:00:52.769636 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38c7f4e3-4845-42a1-8496-18a55de398e6" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769642 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="38c7f4e3-4845-42a1-8496-18a55de398e6" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769792 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d33f5d23-ad2b-47d7-90be-85d9105a6530" containerName="dnsmasq-dns" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769807 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6da504f-b624-44ac-8baa-8e144014ecea" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769814 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="38c7f4e3-4845-42a1-8496-18a55de398e6" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769825 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bc1ab68-07de-40a8-bae9-49548389842e" containerName="mariadb-account-create" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.769836 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="3268714c-4568-4774-b047-a31ae449db77" containerName="init" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.770417 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.772719 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.773088 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-q2nz9" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.773238 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.782883 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-nmzfx"] Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.836287 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/250a15d6-2b1f-4b59-9564-7c7240c9b84e-etc-machine-id\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.836429 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-combined-ca-bundle\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.836517 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8zq4\" (UniqueName: \"kubernetes.io/projected/250a15d6-2b1f-4b59-9564-7c7240c9b84e-kube-api-access-l8zq4\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.836548 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-db-sync-config-data\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.836594 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-config-data\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.836654 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-scripts\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.941753 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-combined-ca-bundle\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.942106 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8zq4\" (UniqueName: \"kubernetes.io/projected/250a15d6-2b1f-4b59-9564-7c7240c9b84e-kube-api-access-l8zq4\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.942206 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-db-sync-config-data\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.942239 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-config-data\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.942281 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-scripts\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.942352 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/250a15d6-2b1f-4b59-9564-7c7240c9b84e-etc-machine-id\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.942450 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/250a15d6-2b1f-4b59-9564-7c7240c9b84e-etc-machine-id\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.948684 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-config-data\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.949941 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-scripts\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.950003 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-combined-ca-bundle\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.951774 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-db-sync-config-data\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:52 crc kubenswrapper[4869]: I0929 14:00:52.965213 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8zq4\" (UniqueName: \"kubernetes.io/projected/250a15d6-2b1f-4b59-9564-7c7240c9b84e-kube-api-access-l8zq4\") pod \"cinder-db-sync-nmzfx\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:53 crc kubenswrapper[4869]: I0929 14:00:53.095218 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:00:53 crc kubenswrapper[4869]: I0929 14:00:53.399297 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api-log" containerID="cri-o://4f747068691611864cbc0fbbe29a8ca927a3a92518ec2755b86f978f4b27ae24" gracePeriod=30 Sep 29 14:00:53 crc kubenswrapper[4869]: I0929 14:00:53.399494 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" event={"ID":"50f38cd8-6393-417f-93cb-9cc3b4e75e93","Type":"ContainerStarted","Data":"24f208b6d5c8876051601a8ee01bee27fbb55b7f61e944307824f3b0761e7ef1"} Sep 29 14:00:53 crc kubenswrapper[4869]: I0929 14:00:53.399522 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api" containerID="cri-o://c4c86734d7c47857b97ba1c98a53dc75465678279f0f316204adde0129e5e8f8" gracePeriod=30 Sep 29 14:00:53 crc kubenswrapper[4869]: I0929 14:00:53.401917 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:00:53 crc kubenswrapper[4869]: I0929 14:00:53.405904 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.148:9322/\": EOF" Sep 29 14:00:53 crc kubenswrapper[4869]: I0929 14:00:53.430314 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" podStartSLOduration=4.430296995 podStartE2EDuration="4.430296995s" podCreationTimestamp="2025-09-29 14:00:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:00:53.4278271 +0000 UTC m=+1179.868471420" watchObservedRunningTime="2025-09-29 14:00:53.430296995 +0000 UTC m=+1179.870941315" Sep 29 14:00:54 crc kubenswrapper[4869]: I0929 14:00:54.253112 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3268714c-4568-4774-b047-a31ae449db77" path="/var/lib/kubelet/pods/3268714c-4568-4774-b047-a31ae449db77/volumes" Sep 29 14:00:54 crc kubenswrapper[4869]: I0929 14:00:54.414928 4869 generic.go:334] "Generic (PLEG): container finished" podID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerID="4f747068691611864cbc0fbbe29a8ca927a3a92518ec2755b86f978f4b27ae24" exitCode=143 Sep 29 14:00:54 crc kubenswrapper[4869]: I0929 14:00:54.415098 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"0a3a19fe-a390-45e0-aa17-1531d6d24c7c","Type":"ContainerDied","Data":"4f747068691611864cbc0fbbe29a8ca927a3a92518ec2755b86f978f4b27ae24"} Sep 29 14:00:54 crc kubenswrapper[4869]: I0929 14:00:54.852346 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Sep 29 14:00:56 crc kubenswrapper[4869]: I0929 14:00:56.144112 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.148:9322/\": read tcp 10.217.0.2:38710->10.217.0.148:9322: read: connection reset by peer" Sep 29 14:00:56 crc kubenswrapper[4869]: I0929 14:00:56.144857 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.148:9322/\": dial tcp 10.217.0.148:9322: connect: connection refused" Sep 29 14:00:56 crc kubenswrapper[4869]: I0929 14:00:56.432295 4869 generic.go:334] "Generic (PLEG): container finished" podID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerID="c4c86734d7c47857b97ba1c98a53dc75465678279f0f316204adde0129e5e8f8" exitCode=0 Sep 29 14:00:56 crc kubenswrapper[4869]: I0929 14:00:56.432380 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"0a3a19fe-a390-45e0-aa17-1531d6d24c7c","Type":"ContainerDied","Data":"c4c86734d7c47857b97ba1c98a53dc75465678279f0f316204adde0129e5e8f8"} Sep 29 14:00:56 crc kubenswrapper[4869]: I0929 14:00:56.435033 4869 generic.go:334] "Generic (PLEG): container finished" podID="7a20ab29-5716-440d-b31f-abc85628a444" containerID="4e9ab8c5cada21407e25ac2a575ceb4deaa58a7801d53893463db62fdcda333d" exitCode=0 Sep 29 14:00:56 crc kubenswrapper[4869]: I0929 14:00:56.435062 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-22z67" event={"ID":"7a20ab29-5716-440d-b31f-abc85628a444","Type":"ContainerDied","Data":"4e9ab8c5cada21407e25ac2a575ceb4deaa58a7801d53893463db62fdcda333d"} Sep 29 14:00:57 crc kubenswrapper[4869]: I0929 14:00:57.874694 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-zl26d"] Sep 29 14:00:57 crc kubenswrapper[4869]: I0929 14:00:57.876515 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:57 crc kubenswrapper[4869]: I0929 14:00:57.880726 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-965dl" Sep 29 14:00:57 crc kubenswrapper[4869]: I0929 14:00:57.880874 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Sep 29 14:00:57 crc kubenswrapper[4869]: I0929 14:00:57.887416 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-zl26d"] Sep 29 14:00:57 crc kubenswrapper[4869]: I0929 14:00:57.966630 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pgkl\" (UniqueName: \"kubernetes.io/projected/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-kube-api-access-8pgkl\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:57 crc kubenswrapper[4869]: I0929 14:00:57.967058 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-db-sync-config-data\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:57 crc kubenswrapper[4869]: I0929 14:00:57.967204 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-combined-ca-bundle\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.068715 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-db-sync-config-data\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.068781 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-combined-ca-bundle\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.068828 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pgkl\" (UniqueName: \"kubernetes.io/projected/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-kube-api-access-8pgkl\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.076158 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-db-sync-config-data\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.076368 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-combined-ca-bundle\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.084779 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pgkl\" (UniqueName: \"kubernetes.io/projected/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-kube-api-access-8pgkl\") pod \"barbican-db-sync-zl26d\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.201978 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-zl26d" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.226951 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-h5s6m"] Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.228950 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.232642 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rf6tc" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.232920 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.233161 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.261398 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-h5s6m"] Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.376868 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-combined-ca-bundle\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.377168 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2m8v\" (UniqueName: \"kubernetes.io/projected/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-kube-api-access-v2m8v\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.377213 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-config\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.456226 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.457011 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-22z67" event={"ID":"7a20ab29-5716-440d-b31f-abc85628a444","Type":"ContainerDied","Data":"1f149363ebdf61f8c045d4fa6d045e9082e58b82433eae577a83205577956273"} Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.457057 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f149363ebdf61f8c045d4fa6d045e9082e58b82433eae577a83205577956273" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.478552 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-combined-ca-bundle\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.478869 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2m8v\" (UniqueName: \"kubernetes.io/projected/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-kube-api-access-v2m8v\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.478902 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-config\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.502303 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-combined-ca-bundle\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.505037 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2m8v\" (UniqueName: \"kubernetes.io/projected/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-kube-api-access-v2m8v\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.506759 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-config\") pod \"neutron-db-sync-h5s6m\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.581251 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-scripts\") pod \"7a20ab29-5716-440d-b31f-abc85628a444\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.581312 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4slk\" (UniqueName: \"kubernetes.io/projected/7a20ab29-5716-440d-b31f-abc85628a444-kube-api-access-g4slk\") pod \"7a20ab29-5716-440d-b31f-abc85628a444\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.581426 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-fernet-keys\") pod \"7a20ab29-5716-440d-b31f-abc85628a444\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.581479 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-combined-ca-bundle\") pod \"7a20ab29-5716-440d-b31f-abc85628a444\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.581534 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-config-data\") pod \"7a20ab29-5716-440d-b31f-abc85628a444\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.581751 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-credential-keys\") pod \"7a20ab29-5716-440d-b31f-abc85628a444\" (UID: \"7a20ab29-5716-440d-b31f-abc85628a444\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.615488 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7a20ab29-5716-440d-b31f-abc85628a444" (UID: "7a20ab29-5716-440d-b31f-abc85628a444"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.618847 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a20ab29-5716-440d-b31f-abc85628a444-kube-api-access-g4slk" (OuterVolumeSpecName: "kube-api-access-g4slk") pod "7a20ab29-5716-440d-b31f-abc85628a444" (UID: "7a20ab29-5716-440d-b31f-abc85628a444"). InnerVolumeSpecName "kube-api-access-g4slk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.625555 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7a20ab29-5716-440d-b31f-abc85628a444" (UID: "7a20ab29-5716-440d-b31f-abc85628a444"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.632876 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-scripts" (OuterVolumeSpecName: "scripts") pod "7a20ab29-5716-440d-b31f-abc85628a444" (UID: "7a20ab29-5716-440d-b31f-abc85628a444"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.687685 4869 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-credential-keys\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.687918 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.688000 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4slk\" (UniqueName: \"kubernetes.io/projected/7a20ab29-5716-440d-b31f-abc85628a444-kube-api-access-g4slk\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.688101 4869 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-fernet-keys\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.694231 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.740760 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a20ab29-5716-440d-b31f-abc85628a444" (UID: "7a20ab29-5716-440d-b31f-abc85628a444"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.745851 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-config-data" (OuterVolumeSpecName: "config-data") pod "7a20ab29-5716-440d-b31f-abc85628a444" (UID: "7a20ab29-5716-440d-b31f-abc85628a444"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.754596 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.790759 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-combined-ca-bundle\") pod \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.791299 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-custom-prometheus-ca\") pod \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.791347 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-logs\") pod \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.791398 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-config-data\") pod \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.791965 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpxgr\" (UniqueName: \"kubernetes.io/projected/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-kube-api-access-fpxgr\") pod \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\" (UID: \"0a3a19fe-a390-45e0-aa17-1531d6d24c7c\") " Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.792889 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-logs" (OuterVolumeSpecName: "logs") pod "0a3a19fe-a390-45e0-aa17-1531d6d24c7c" (UID: "0a3a19fe-a390-45e0-aa17-1531d6d24c7c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.793423 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.793442 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.793452 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a20ab29-5716-440d-b31f-abc85628a444-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.798224 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-kube-api-access-fpxgr" (OuterVolumeSpecName: "kube-api-access-fpxgr") pod "0a3a19fe-a390-45e0-aa17-1531d6d24c7c" (UID: "0a3a19fe-a390-45e0-aa17-1531d6d24c7c"). InnerVolumeSpecName "kube-api-access-fpxgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.823000 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "0a3a19fe-a390-45e0-aa17-1531d6d24c7c" (UID: "0a3a19fe-a390-45e0-aa17-1531d6d24c7c"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.858734 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a3a19fe-a390-45e0-aa17-1531d6d24c7c" (UID: "0a3a19fe-a390-45e0-aa17-1531d6d24c7c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.883761 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-config-data" (OuterVolumeSpecName: "config-data") pod "0a3a19fe-a390-45e0-aa17-1531d6d24c7c" (UID: "0a3a19fe-a390-45e0-aa17-1531d6d24c7c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.894925 4869 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.894967 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.894978 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpxgr\" (UniqueName: \"kubernetes.io/projected/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-kube-api-access-fpxgr\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.894989 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a3a19fe-a390-45e0-aa17-1531d6d24c7c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.897557 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-zl26d"] Sep 29 14:00:58 crc kubenswrapper[4869]: I0929 14:00:58.907284 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-nmzfx"] Sep 29 14:00:58 crc kubenswrapper[4869]: W0929 14:00:58.915411 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0608e6b0_5ca1_4a7e_80d5_0286e50dbd1c.slice/crio-c951bf0371a52580341db838253cd2d0cd7fefa4ba4e8252c82dba749958e609 WatchSource:0}: Error finding container c951bf0371a52580341db838253cd2d0cd7fefa4ba4e8252c82dba749958e609: Status 404 returned error can't find the container with id c951bf0371a52580341db838253cd2d0cd7fefa4ba4e8252c82dba749958e609 Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.274496 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-h5s6m"] Sep 29 14:00:59 crc kubenswrapper[4869]: W0929 14:00:59.323971 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode99dfaa4_d2d7_411a_bea7_4a4768c31ee4.slice/crio-819de13cb82997f5e9bbb2d01854cc72bf85794343a8dfaf3e3c2a9bcd703149 WatchSource:0}: Error finding container 819de13cb82997f5e9bbb2d01854cc72bf85794343a8dfaf3e3c2a9bcd703149: Status 404 returned error can't find the container with id 819de13cb82997f5e9bbb2d01854cc72bf85794343a8dfaf3e3c2a9bcd703149 Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.472943 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"50dde599-0ce1-482d-b6ab-402e7b9a9997","Type":"ContainerStarted","Data":"9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110"} Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.499876 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"0a3a19fe-a390-45e0-aa17-1531d6d24c7c","Type":"ContainerDied","Data":"d43f808d884cb1205827434d0b087254e432d8eabc53cb5c6bebba696311fd5d"} Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.499917 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.499931 4869 scope.go:117] "RemoveContainer" containerID="c4c86734d7c47857b97ba1c98a53dc75465678279f0f316204adde0129e5e8f8" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.511170 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9d009b88-eac6-4112-9cd5-aa7d9aed7811","Type":"ContainerStarted","Data":"dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0"} Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.521309 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=3.006673723 podStartE2EDuration="10.521286543s" podCreationTimestamp="2025-09-29 14:00:49 +0000 UTC" firstStartedPulling="2025-09-29 14:00:50.761247059 +0000 UTC m=+1177.201891379" lastFinishedPulling="2025-09-29 14:00:58.275859879 +0000 UTC m=+1184.716504199" observedRunningTime="2025-09-29 14:00:59.492584776 +0000 UTC m=+1185.933229106" watchObservedRunningTime="2025-09-29 14:00:59.521286543 +0000 UTC m=+1185.961930863" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.526809 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerStarted","Data":"11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420"} Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.531200 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-h5s6m" event={"ID":"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4","Type":"ContainerStarted","Data":"819de13cb82997f5e9bbb2d01854cc72bf85794343a8dfaf3e3c2a9bcd703149"} Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.538370 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-zl26d" event={"ID":"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c","Type":"ContainerStarted","Data":"c951bf0371a52580341db838253cd2d0cd7fefa4ba4e8252c82dba749958e609"} Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.538364 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.810020087 podStartE2EDuration="10.538350147s" podCreationTimestamp="2025-09-29 14:00:49 +0000 UTC" firstStartedPulling="2025-09-29 14:00:50.559359796 +0000 UTC m=+1177.000004116" lastFinishedPulling="2025-09-29 14:00:58.287689856 +0000 UTC m=+1184.728334176" observedRunningTime="2025-09-29 14:00:59.537999288 +0000 UTC m=+1185.978643608" watchObservedRunningTime="2025-09-29 14:00:59.538350147 +0000 UTC m=+1185.978994467" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.563101 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vxszp" event={"ID":"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc","Type":"ContainerStarted","Data":"1b4c042eb5b18c1f24bfde21733be358d8ea0a065d2648a2df936e27551f973b"} Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.572963 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-nmzfx" event={"ID":"250a15d6-2b1f-4b59-9564-7c7240c9b84e","Type":"ContainerStarted","Data":"e22348f2c1ae2c5248fbbe81bf590f76cd7593196ea7bed1e4e14cad2e74562d"} Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.572979 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-22z67" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.579264 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.592294 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.613853 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:00:59 crc kubenswrapper[4869]: E0929 14:00:59.614361 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.614381 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api" Sep 29 14:00:59 crc kubenswrapper[4869]: E0929 14:00:59.614404 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a20ab29-5716-440d-b31f-abc85628a444" containerName="keystone-bootstrap" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.614411 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a20ab29-5716-440d-b31f-abc85628a444" containerName="keystone-bootstrap" Sep 29 14:00:59 crc kubenswrapper[4869]: E0929 14:00:59.614427 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api-log" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.614433 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api-log" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.614648 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a20ab29-5716-440d-b31f-abc85628a444" containerName="keystone-bootstrap" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.614673 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.614693 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" containerName="watcher-api-log" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.615871 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.622249 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.628883 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-vxszp" podStartSLOduration=3.263353112 podStartE2EDuration="10.628861602s" podCreationTimestamp="2025-09-29 14:00:49 +0000 UTC" firstStartedPulling="2025-09-29 14:00:50.941335505 +0000 UTC m=+1177.381979825" lastFinishedPulling="2025-09-29 14:00:58.306843995 +0000 UTC m=+1184.747488315" observedRunningTime="2025-09-29 14:00:59.602474496 +0000 UTC m=+1186.043118816" watchObservedRunningTime="2025-09-29 14:00:59.628861602 +0000 UTC m=+1186.069505922" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.646735 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.656593 4869 scope.go:117] "RemoveContainer" containerID="4f747068691611864cbc0fbbe29a8ca927a3a92518ec2755b86f978f4b27ae24" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.656804 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-22z67"] Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.672217 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-22z67"] Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.720842 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whhjq\" (UniqueName: \"kubernetes.io/projected/cd994194-27ba-4c7b-920e-8f8a368dfe13-kube-api-access-whhjq\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.720931 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-config-data\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.721026 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd994194-27ba-4c7b-920e-8f8a368dfe13-logs\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.721066 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.721208 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.729194 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-bn5lj"] Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.733329 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.737819 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.738071 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.738061 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-77f6b" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.738402 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.750189 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.772773 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bn5lj"] Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.812754 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.824092 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-scripts\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.824721 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-credential-keys\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.824914 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whhjq\" (UniqueName: \"kubernetes.io/projected/cd994194-27ba-4c7b-920e-8f8a368dfe13-kube-api-access-whhjq\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.825029 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-combined-ca-bundle\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.825120 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-config-data\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.825202 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qml2n\" (UniqueName: \"kubernetes.io/projected/b77999d8-2492-4cde-8630-d0110a1884fb-kube-api-access-qml2n\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.825421 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd994194-27ba-4c7b-920e-8f8a368dfe13-logs\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.825545 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.825632 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-config-data\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.825715 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-fernet-keys\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.826407 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd994194-27ba-4c7b-920e-8f8a368dfe13-logs\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.827261 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.835648 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.836282 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.842438 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-config-data\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.845484 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whhjq\" (UniqueName: \"kubernetes.io/projected/cd994194-27ba-4c7b-920e-8f8a368dfe13-kube-api-access-whhjq\") pod \"watcher-api-0\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " pod="openstack/watcher-api-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.876017 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.876071 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.929101 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-scripts\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.929182 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-credential-keys\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.929234 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-combined-ca-bundle\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.929273 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qml2n\" (UniqueName: \"kubernetes.io/projected/b77999d8-2492-4cde-8630-d0110a1884fb-kube-api-access-qml2n\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.929324 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-config-data\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.929347 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-fernet-keys\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.933705 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-scripts\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.939354 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-config-data\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.940400 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.941529 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-fernet-keys\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.943862 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-combined-ca-bundle\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.944620 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-credential-keys\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.949969 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qml2n\" (UniqueName: \"kubernetes.io/projected/b77999d8-2492-4cde-8630-d0110a1884fb-kube-api-access-qml2n\") pod \"keystone-bootstrap-bn5lj\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:00:59 crc kubenswrapper[4869]: I0929 14:00:59.966099 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.061820 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.268023 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a3a19fe-a390-45e0-aa17-1531d6d24c7c" path="/var/lib/kubelet/pods/0a3a19fe-a390-45e0-aa17-1531d6d24c7c/volumes" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.268854 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a20ab29-5716-440d-b31f-abc85628a444" path="/var/lib/kubelet/pods/7a20ab29-5716-440d-b31f-abc85628a444/volumes" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.347056 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.407944 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd9c84cb5-7fcrr"] Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.408542 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" podUID="00d07511-b138-4075-8d8a-5b14a27917fd" containerName="dnsmasq-dns" containerID="cri-o://e4cf54ec200f1345674425f82e0650cc96afed8ec9ca93aa67d312685afc2b8a" gracePeriod=10 Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.598548 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-h5s6m" event={"ID":"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4","Type":"ContainerStarted","Data":"c1b5c190354c6081c48119f76f3d1a7d5e741635899a3177a3ca14daf830eec3"} Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.607095 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.629424 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-h5s6m" podStartSLOduration=2.6293984249999998 podStartE2EDuration="2.629398425s" podCreationTimestamp="2025-09-29 14:00:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:00.617962708 +0000 UTC m=+1187.058607028" watchObservedRunningTime="2025-09-29 14:01:00.629398425 +0000 UTC m=+1187.070042765" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.636907 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.668951 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.675569 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:01:00 crc kubenswrapper[4869]: I0929 14:01:00.721337 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:01:01 crc kubenswrapper[4869]: I0929 14:01:01.632353 4869 generic.go:334] "Generic (PLEG): container finished" podID="00d07511-b138-4075-8d8a-5b14a27917fd" containerID="e4cf54ec200f1345674425f82e0650cc96afed8ec9ca93aa67d312685afc2b8a" exitCode=0 Sep 29 14:01:01 crc kubenswrapper[4869]: I0929 14:01:01.633870 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" event={"ID":"00d07511-b138-4075-8d8a-5b14a27917fd","Type":"ContainerDied","Data":"e4cf54ec200f1345674425f82e0650cc96afed8ec9ca93aa67d312685afc2b8a"} Sep 29 14:01:02 crc kubenswrapper[4869]: I0929 14:01:02.641142 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="9d009b88-eac6-4112-9cd5-aa7d9aed7811" containerName="watcher-decision-engine" containerID="cri-o://dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0" gracePeriod=30 Sep 29 14:01:02 crc kubenswrapper[4869]: I0929 14:01:02.641391 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-applier-0" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" containerID="cri-o://9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" gracePeriod=30 Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.070020 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.205869 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-nb\") pod \"00d07511-b138-4075-8d8a-5b14a27917fd\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.205943 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-sb\") pod \"00d07511-b138-4075-8d8a-5b14a27917fd\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.206006 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-config\") pod \"00d07511-b138-4075-8d8a-5b14a27917fd\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.206054 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rvrx\" (UniqueName: \"kubernetes.io/projected/00d07511-b138-4075-8d8a-5b14a27917fd-kube-api-access-7rvrx\") pod \"00d07511-b138-4075-8d8a-5b14a27917fd\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.206076 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-dns-svc\") pod \"00d07511-b138-4075-8d8a-5b14a27917fd\" (UID: \"00d07511-b138-4075-8d8a-5b14a27917fd\") " Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.210450 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00d07511-b138-4075-8d8a-5b14a27917fd-kube-api-access-7rvrx" (OuterVolumeSpecName: "kube-api-access-7rvrx") pod "00d07511-b138-4075-8d8a-5b14a27917fd" (UID: "00d07511-b138-4075-8d8a-5b14a27917fd"). InnerVolumeSpecName "kube-api-access-7rvrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.251978 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-config" (OuterVolumeSpecName: "config") pod "00d07511-b138-4075-8d8a-5b14a27917fd" (UID: "00d07511-b138-4075-8d8a-5b14a27917fd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.253821 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "00d07511-b138-4075-8d8a-5b14a27917fd" (UID: "00d07511-b138-4075-8d8a-5b14a27917fd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.253862 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "00d07511-b138-4075-8d8a-5b14a27917fd" (UID: "00d07511-b138-4075-8d8a-5b14a27917fd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.259474 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "00d07511-b138-4075-8d8a-5b14a27917fd" (UID: "00d07511-b138-4075-8d8a-5b14a27917fd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.277980 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bn5lj"] Sep 29 14:01:03 crc kubenswrapper[4869]: W0929 14:01:03.280138 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb77999d8_2492_4cde_8630_d0110a1884fb.slice/crio-e213dffffa5e902c4308e9751adb164ac025da34d74fdad002ca803e1fb9d991 WatchSource:0}: Error finding container e213dffffa5e902c4308e9751adb164ac025da34d74fdad002ca803e1fb9d991: Status 404 returned error can't find the container with id e213dffffa5e902c4308e9751adb164ac025da34d74fdad002ca803e1fb9d991 Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.308264 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.308307 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.308320 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.308334 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rvrx\" (UniqueName: \"kubernetes.io/projected/00d07511-b138-4075-8d8a-5b14a27917fd-kube-api-access-7rvrx\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.308347 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00d07511-b138-4075-8d8a-5b14a27917fd-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.390313 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.650454 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"cd994194-27ba-4c7b-920e-8f8a368dfe13","Type":"ContainerStarted","Data":"da53aa15d4668f1bb6acaf7cbc304c147b73119d2fac6b72fbddc3f1fbd1c7a1"} Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.650721 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"cd994194-27ba-4c7b-920e-8f8a368dfe13","Type":"ContainerStarted","Data":"f22f226ec738df54e907cf14e32681e7c18a79dd1fd54a7bb1adcb69724d69f3"} Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.652574 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" event={"ID":"00d07511-b138-4075-8d8a-5b14a27917fd","Type":"ContainerDied","Data":"7c30c213b809158afa950ac54014e7db1370f526c91aba8c0616d618c78f9ce9"} Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.652621 4869 scope.go:117] "RemoveContainer" containerID="e4cf54ec200f1345674425f82e0650cc96afed8ec9ca93aa67d312685afc2b8a" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.652647 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd9c84cb5-7fcrr" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.656859 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bn5lj" event={"ID":"b77999d8-2492-4cde-8630-d0110a1884fb","Type":"ContainerStarted","Data":"159c01f7798e101c42860c996158544b046ab41145230ab7ed9c0632fab9187d"} Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.656889 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bn5lj" event={"ID":"b77999d8-2492-4cde-8630-d0110a1884fb","Type":"ContainerStarted","Data":"e213dffffa5e902c4308e9751adb164ac025da34d74fdad002ca803e1fb9d991"} Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.659737 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerStarted","Data":"ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42"} Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.676735 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-bn5lj" podStartSLOduration=4.676717262 podStartE2EDuration="4.676717262s" podCreationTimestamp="2025-09-29 14:00:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:03.674441433 +0000 UTC m=+1190.115085753" watchObservedRunningTime="2025-09-29 14:01:03.676717262 +0000 UTC m=+1190.117361582" Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.693240 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd9c84cb5-7fcrr"] Sep 29 14:01:03 crc kubenswrapper[4869]: I0929 14:01:03.701422 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd9c84cb5-7fcrr"] Sep 29 14:01:04 crc kubenswrapper[4869]: I0929 14:01:04.276359 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00d07511-b138-4075-8d8a-5b14a27917fd" path="/var/lib/kubelet/pods/00d07511-b138-4075-8d8a-5b14a27917fd/volumes" Sep 29 14:01:04 crc kubenswrapper[4869]: E0929 14:01:04.879403 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:04 crc kubenswrapper[4869]: E0929 14:01:04.882839 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:04 crc kubenswrapper[4869]: E0929 14:01:04.885454 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:04 crc kubenswrapper[4869]: E0929 14:01:04.885515 4869 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.267041 4869 scope.go:117] "RemoveContainer" containerID="d3ee13a0c408b25d95fa68140d328168a817eaa5069987899686955cbd20a002" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.569023 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.672396 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-config-data\") pod \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.672695 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvq45\" (UniqueName: \"kubernetes.io/projected/9d009b88-eac6-4112-9cd5-aa7d9aed7811-kube-api-access-pvq45\") pod \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.672728 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-combined-ca-bundle\") pod \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.672751 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d009b88-eac6-4112-9cd5-aa7d9aed7811-logs\") pod \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.672787 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-custom-prometheus-ca\") pod \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\" (UID: \"9d009b88-eac6-4112-9cd5-aa7d9aed7811\") " Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.674725 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d009b88-eac6-4112-9cd5-aa7d9aed7811-logs" (OuterVolumeSpecName: "logs") pod "9d009b88-eac6-4112-9cd5-aa7d9aed7811" (UID: "9d009b88-eac6-4112-9cd5-aa7d9aed7811"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.677915 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d009b88-eac6-4112-9cd5-aa7d9aed7811-kube-api-access-pvq45" (OuterVolumeSpecName: "kube-api-access-pvq45") pod "9d009b88-eac6-4112-9cd5-aa7d9aed7811" (UID: "9d009b88-eac6-4112-9cd5-aa7d9aed7811"). InnerVolumeSpecName "kube-api-access-pvq45". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.702831 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"cd994194-27ba-4c7b-920e-8f8a368dfe13","Type":"ContainerStarted","Data":"461769584b8901c869aa2daee5c9029de5164ff78f77a904e4750f00bfdd8c1f"} Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.703381 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.705410 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-zl26d" event={"ID":"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c","Type":"ContainerStarted","Data":"d3ffed8f08c55753d1820f91211c4143f452770337a6ff28eb5eacc04a5db8ea"} Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.706060 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.156:9322/\": dial tcp 10.217.0.156:9322: connect: connection refused" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.707970 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d009b88-eac6-4112-9cd5-aa7d9aed7811" (UID: "9d009b88-eac6-4112-9cd5-aa7d9aed7811"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.708948 4869 generic.go:334] "Generic (PLEG): container finished" podID="2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" containerID="1b4c042eb5b18c1f24bfde21733be358d8ea0a065d2648a2df936e27551f973b" exitCode=0 Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.708995 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vxszp" event={"ID":"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc","Type":"ContainerDied","Data":"1b4c042eb5b18c1f24bfde21733be358d8ea0a065d2648a2df936e27551f973b"} Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.711773 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "9d009b88-eac6-4112-9cd5-aa7d9aed7811" (UID: "9d009b88-eac6-4112-9cd5-aa7d9aed7811"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.722227 4869 generic.go:334] "Generic (PLEG): container finished" podID="9d009b88-eac6-4112-9cd5-aa7d9aed7811" containerID="dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0" exitCode=1 Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.722283 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9d009b88-eac6-4112-9cd5-aa7d9aed7811","Type":"ContainerDied","Data":"dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0"} Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.722314 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9d009b88-eac6-4112-9cd5-aa7d9aed7811","Type":"ContainerDied","Data":"f51961ea90995c6f6ffb51380582dba4b48c6cc116699d1c7db602acd6d17488"} Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.722332 4869 scope.go:117] "RemoveContainer" containerID="dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.723025 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.739125 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=7.739102731 podStartE2EDuration="7.739102731s" podCreationTimestamp="2025-09-29 14:00:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:06.725023984 +0000 UTC m=+1193.165668324" watchObservedRunningTime="2025-09-29 14:01:06.739102731 +0000 UTC m=+1193.179747051" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.776591 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvq45\" (UniqueName: \"kubernetes.io/projected/9d009b88-eac6-4112-9cd5-aa7d9aed7811-kube-api-access-pvq45\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.776646 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.776659 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d009b88-eac6-4112-9cd5-aa7d9aed7811-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.776675 4869 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.777554 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-config-data" (OuterVolumeSpecName: "config-data") pod "9d009b88-eac6-4112-9cd5-aa7d9aed7811" (UID: "9d009b88-eac6-4112-9cd5-aa7d9aed7811"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.779438 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-zl26d" podStartSLOduration=2.374355621 podStartE2EDuration="9.77942317s" podCreationTimestamp="2025-09-29 14:00:57 +0000 UTC" firstStartedPulling="2025-09-29 14:00:58.921251491 +0000 UTC m=+1185.361895801" lastFinishedPulling="2025-09-29 14:01:06.32631903 +0000 UTC m=+1192.766963350" observedRunningTime="2025-09-29 14:01:06.773625189 +0000 UTC m=+1193.214269519" watchObservedRunningTime="2025-09-29 14:01:06.77942317 +0000 UTC m=+1193.220067480" Sep 29 14:01:06 crc kubenswrapper[4869]: I0929 14:01:06.878177 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d009b88-eac6-4112-9cd5-aa7d9aed7811-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.080502 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.091427 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.110969 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:01:07 crc kubenswrapper[4869]: E0929 14:01:07.111542 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00d07511-b138-4075-8d8a-5b14a27917fd" containerName="dnsmasq-dns" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.111555 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="00d07511-b138-4075-8d8a-5b14a27917fd" containerName="dnsmasq-dns" Sep 29 14:01:07 crc kubenswrapper[4869]: E0929 14:01:07.111576 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d009b88-eac6-4112-9cd5-aa7d9aed7811" containerName="watcher-decision-engine" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.111582 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d009b88-eac6-4112-9cd5-aa7d9aed7811" containerName="watcher-decision-engine" Sep 29 14:01:07 crc kubenswrapper[4869]: E0929 14:01:07.111595 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00d07511-b138-4075-8d8a-5b14a27917fd" containerName="init" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.111601 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="00d07511-b138-4075-8d8a-5b14a27917fd" containerName="init" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.111807 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d009b88-eac6-4112-9cd5-aa7d9aed7811" containerName="watcher-decision-engine" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.111834 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="00d07511-b138-4075-8d8a-5b14a27917fd" containerName="dnsmasq-dns" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.112860 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.117987 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.126092 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.184402 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z6l6\" (UniqueName: \"kubernetes.io/projected/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-kube-api-access-5z6l6\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.185121 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.185269 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-config-data\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.185303 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.185840 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-logs\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.288487 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-config-data\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.288989 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.289021 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.289278 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-logs\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.289896 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-logs\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.290026 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z6l6\" (UniqueName: \"kubernetes.io/projected/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-kube-api-access-5z6l6\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.297493 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-config-data\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.300009 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.308283 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z6l6\" (UniqueName: \"kubernetes.io/projected/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-kube-api-access-5z6l6\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.314920 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/103100a6-0dbb-481c-ba0e-4e7a2e5c38f6-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6\") " pod="openstack/watcher-decision-engine-0" Sep 29 14:01:07 crc kubenswrapper[4869]: I0929 14:01:07.445378 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:08 crc kubenswrapper[4869]: I0929 14:01:08.252124 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d009b88-eac6-4112-9cd5-aa7d9aed7811" path="/var/lib/kubelet/pods/9d009b88-eac6-4112-9cd5-aa7d9aed7811/volumes" Sep 29 14:01:08 crc kubenswrapper[4869]: I0929 14:01:08.741286 4869 generic.go:334] "Generic (PLEG): container finished" podID="b77999d8-2492-4cde-8630-d0110a1884fb" containerID="159c01f7798e101c42860c996158544b046ab41145230ab7ed9c0632fab9187d" exitCode=0 Sep 29 14:01:08 crc kubenswrapper[4869]: I0929 14:01:08.741349 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bn5lj" event={"ID":"b77999d8-2492-4cde-8630-d0110a1884fb","Type":"ContainerDied","Data":"159c01f7798e101c42860c996158544b046ab41145230ab7ed9c0632fab9187d"} Sep 29 14:01:09 crc kubenswrapper[4869]: E0929 14:01:09.879521 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:09 crc kubenswrapper[4869]: E0929 14:01:09.883465 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:09 crc kubenswrapper[4869]: E0929 14:01:09.889630 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:09 crc kubenswrapper[4869]: E0929 14:01:09.889669 4869 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:09 crc kubenswrapper[4869]: I0929 14:01:09.967751 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Sep 29 14:01:09 crc kubenswrapper[4869]: I0929 14:01:09.967908 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Sep 29 14:01:10 crc kubenswrapper[4869]: I0929 14:01:10.019344 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Sep 29 14:01:11 crc kubenswrapper[4869]: I0929 14:01:11.009837 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/watcher-api-0" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.156:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Sep 29 14:01:14 crc kubenswrapper[4869]: E0929 14:01:14.877724 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:14 crc kubenswrapper[4869]: E0929 14:01:14.879552 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:14 crc kubenswrapper[4869]: E0929 14:01:14.896184 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:14 crc kubenswrapper[4869]: E0929 14:01:14.896671 4869 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.058776 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vxszp" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.184732 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-config-data\") pod \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.184834 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-245f4\" (UniqueName: \"kubernetes.io/projected/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-kube-api-access-245f4\") pod \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.184869 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-scripts\") pod \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.184925 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-logs\") pod \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.184998 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-combined-ca-bundle\") pod \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\" (UID: \"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc\") " Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.185977 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-logs" (OuterVolumeSpecName: "logs") pod "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" (UID: "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.190918 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-kube-api-access-245f4" (OuterVolumeSpecName: "kube-api-access-245f4") pod "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" (UID: "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc"). InnerVolumeSpecName "kube-api-access-245f4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.198542 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-scripts" (OuterVolumeSpecName: "scripts") pod "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" (UID: "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.220816 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" (UID: "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.222797 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-config-data" (OuterVolumeSpecName: "config-data") pod "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" (UID: "2c7daafc-ae3a-4161-8e8e-8e3651a1afcc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.287971 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.288004 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-245f4\" (UniqueName: \"kubernetes.io/projected/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-kube-api-access-245f4\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.288014 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.288023 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.288033 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.809490 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vxszp" event={"ID":"2c7daafc-ae3a-4161-8e8e-8e3651a1afcc","Type":"ContainerDied","Data":"c08d607a7343918bd1bec5d8535d09b1765cd2ae30948e9cb7fd388b8dd8ec94"} Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.809555 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vxszp" Sep 29 14:01:15 crc kubenswrapper[4869]: I0929 14:01:15.809578 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c08d607a7343918bd1bec5d8535d09b1765cd2ae30948e9cb7fd388b8dd8ec94" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.168054 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5cbb465496-fhs6h"] Sep 29 14:01:16 crc kubenswrapper[4869]: E0929 14:01:16.169863 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" containerName="placement-db-sync" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.169980 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" containerName="placement-db-sync" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.170354 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" containerName="placement-db-sync" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.172118 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.174925 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.175255 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-pdcrv" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.175422 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.175779 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.179372 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.186635 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5cbb465496-fhs6h"] Sep 29 14:01:16 crc kubenswrapper[4869]: E0929 14:01:16.202383 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Sep 29 14:01:16 crc kubenswrapper[4869]: E0929 14:01:16.202431 4869 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.203:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Sep 29 14:01:16 crc kubenswrapper[4869]: E0929 14:01:16.202542 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:38.102.83.203:5001/podified-master-centos10/openstack-cinder-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l8zq4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-nmzfx_openstack(250a15d6-2b1f-4b59-9564-7c7240c9b84e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Sep 29 14:01:16 crc kubenswrapper[4869]: E0929 14:01:16.203721 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-nmzfx" podUID="250a15d6-2b1f-4b59-9564-7c7240c9b84e" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.309354 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhkqf\" (UniqueName: \"kubernetes.io/projected/718a89d5-d692-49e1-8e66-c647ca06125e-kube-api-access-nhkqf\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.309456 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/718a89d5-d692-49e1-8e66-c647ca06125e-logs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.309499 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-combined-ca-bundle\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.309565 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-config-data\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.309597 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-public-tls-certs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.309638 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-internal-tls-certs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.309663 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-scripts\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.411131 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-internal-tls-certs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.411195 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-scripts\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.411261 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhkqf\" (UniqueName: \"kubernetes.io/projected/718a89d5-d692-49e1-8e66-c647ca06125e-kube-api-access-nhkqf\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.411301 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/718a89d5-d692-49e1-8e66-c647ca06125e-logs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.411332 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-combined-ca-bundle\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.411376 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-config-data\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.411393 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-public-tls-certs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.411794 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/718a89d5-d692-49e1-8e66-c647ca06125e-logs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.419538 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-public-tls-certs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.420671 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-combined-ca-bundle\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.421065 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-scripts\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.422670 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-config-data\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.424214 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/718a89d5-d692-49e1-8e66-c647ca06125e-internal-tls-certs\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.446295 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhkqf\" (UniqueName: \"kubernetes.io/projected/718a89d5-d692-49e1-8e66-c647ca06125e-kube-api-access-nhkqf\") pod \"placement-5cbb465496-fhs6h\" (UID: \"718a89d5-d692-49e1-8e66-c647ca06125e\") " pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.506233 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.819871 4869 generic.go:334] "Generic (PLEG): container finished" podID="0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c" containerID="d3ffed8f08c55753d1820f91211c4143f452770337a6ff28eb5eacc04a5db8ea" exitCode=0 Sep 29 14:01:16 crc kubenswrapper[4869]: I0929 14:01:16.819935 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-zl26d" event={"ID":"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c","Type":"ContainerDied","Data":"d3ffed8f08c55753d1820f91211c4143f452770337a6ff28eb5eacc04a5db8ea"} Sep 29 14:01:16 crc kubenswrapper[4869]: E0929 14:01:16.821867 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.203:5001/podified-master-centos10/openstack-cinder-api:watcher_latest\\\"\"" pod="openstack/cinder-db-sync-nmzfx" podUID="250a15d6-2b1f-4b59-9564-7c7240c9b84e" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.513647 4869 scope.go:117] "RemoveContainer" containerID="dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0" Sep 29 14:01:17 crc kubenswrapper[4869]: E0929 14:01:17.514287 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0\": container with ID starting with dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0 not found: ID does not exist" containerID="dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.514353 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0"} err="failed to get container status \"dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0\": rpc error: code = NotFound desc = could not find container \"dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0\": container with ID starting with dc2cea4907ce2753c9283af0a78195ad22aa86de3588daf841809ffc854659a0 not found: ID does not exist" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.581896 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.648519 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-combined-ca-bundle\") pod \"b77999d8-2492-4cde-8630-d0110a1884fb\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.648697 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-scripts\") pod \"b77999d8-2492-4cde-8630-d0110a1884fb\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.648818 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-config-data\") pod \"b77999d8-2492-4cde-8630-d0110a1884fb\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.648874 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-fernet-keys\") pod \"b77999d8-2492-4cde-8630-d0110a1884fb\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.648894 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-credential-keys\") pod \"b77999d8-2492-4cde-8630-d0110a1884fb\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.648950 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qml2n\" (UniqueName: \"kubernetes.io/projected/b77999d8-2492-4cde-8630-d0110a1884fb-kube-api-access-qml2n\") pod \"b77999d8-2492-4cde-8630-d0110a1884fb\" (UID: \"b77999d8-2492-4cde-8630-d0110a1884fb\") " Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.654748 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b77999d8-2492-4cde-8630-d0110a1884fb" (UID: "b77999d8-2492-4cde-8630-d0110a1884fb"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.663408 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-scripts" (OuterVolumeSpecName: "scripts") pod "b77999d8-2492-4cde-8630-d0110a1884fb" (UID: "b77999d8-2492-4cde-8630-d0110a1884fb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.668816 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b77999d8-2492-4cde-8630-d0110a1884fb" (UID: "b77999d8-2492-4cde-8630-d0110a1884fb"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.671371 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b77999d8-2492-4cde-8630-d0110a1884fb-kube-api-access-qml2n" (OuterVolumeSpecName: "kube-api-access-qml2n") pod "b77999d8-2492-4cde-8630-d0110a1884fb" (UID: "b77999d8-2492-4cde-8630-d0110a1884fb"). InnerVolumeSpecName "kube-api-access-qml2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.678513 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b77999d8-2492-4cde-8630-d0110a1884fb" (UID: "b77999d8-2492-4cde-8630-d0110a1884fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.678834 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-config-data" (OuterVolumeSpecName: "config-data") pod "b77999d8-2492-4cde-8630-d0110a1884fb" (UID: "b77999d8-2492-4cde-8630-d0110a1884fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.751092 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.751123 4869 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-fernet-keys\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.751135 4869 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-credential-keys\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.751145 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qml2n\" (UniqueName: \"kubernetes.io/projected/b77999d8-2492-4cde-8630-d0110a1884fb-kube-api-access-qml2n\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.751157 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.751165 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b77999d8-2492-4cde-8630-d0110a1884fb-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.829660 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bn5lj" event={"ID":"b77999d8-2492-4cde-8630-d0110a1884fb","Type":"ContainerDied","Data":"e213dffffa5e902c4308e9751adb164ac025da34d74fdad002ca803e1fb9d991"} Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.829697 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e213dffffa5e902c4308e9751adb164ac025da34d74fdad002ca803e1fb9d991" Sep 29 14:01:17 crc kubenswrapper[4869]: I0929 14:01:17.829751 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bn5lj" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.780489 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5b99b5b7f7-z8lvm"] Sep 29 14:01:18 crc kubenswrapper[4869]: E0929 14:01:18.781111 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b77999d8-2492-4cde-8630-d0110a1884fb" containerName="keystone-bootstrap" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.781131 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="b77999d8-2492-4cde-8630-d0110a1884fb" containerName="keystone-bootstrap" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.781376 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="b77999d8-2492-4cde-8630-d0110a1884fb" containerName="keystone-bootstrap" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.782169 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.784677 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.785173 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.785195 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-77f6b" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.785357 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.785492 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.788500 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.805077 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b99b5b7f7-z8lvm"] Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.874794 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-fernet-keys\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.874873 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-combined-ca-bundle\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.874901 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-config-data\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.874922 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-internal-tls-certs\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.874950 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-public-tls-certs\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.874985 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-scripts\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.875035 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-credential-keys\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.875065 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdxhk\" (UniqueName: \"kubernetes.io/projected/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-kube-api-access-gdxhk\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.976883 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdxhk\" (UniqueName: \"kubernetes.io/projected/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-kube-api-access-gdxhk\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.977054 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-fernet-keys\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.977096 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-combined-ca-bundle\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.977185 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-config-data\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.977231 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-internal-tls-certs\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.977259 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-public-tls-certs\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.977322 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-scripts\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.977362 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-credential-keys\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.982805 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-scripts\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.983899 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-combined-ca-bundle\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.984292 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-config-data\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.984540 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-fernet-keys\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.984783 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-credential-keys\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.985247 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-internal-tls-certs\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:18 crc kubenswrapper[4869]: I0929 14:01:18.996282 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-public-tls-certs\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.004563 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdxhk\" (UniqueName: \"kubernetes.io/projected/cef8fcc7-7d06-439c-8d41-1948f9fbda1b-kube-api-access-gdxhk\") pod \"keystone-5b99b5b7f7-z8lvm\" (UID: \"cef8fcc7-7d06-439c-8d41-1948f9fbda1b\") " pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.101457 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.127859 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-zl26d" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.282489 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pgkl\" (UniqueName: \"kubernetes.io/projected/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-kube-api-access-8pgkl\") pod \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.284056 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-combined-ca-bundle\") pod \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.284328 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-db-sync-config-data\") pod \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\" (UID: \"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c\") " Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.289350 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-kube-api-access-8pgkl" (OuterVolumeSpecName: "kube-api-access-8pgkl") pod "0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c" (UID: "0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c"). InnerVolumeSpecName "kube-api-access-8pgkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.289853 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c" (UID: "0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.316711 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c" (UID: "0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.386800 4869 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.386841 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pgkl\" (UniqueName: \"kubernetes.io/projected/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-kube-api-access-8pgkl\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.386854 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.451957 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5cbb465496-fhs6h"] Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.524077 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.624155 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b99b5b7f7-z8lvm"] Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.851913 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cbb465496-fhs6h" event={"ID":"718a89d5-d692-49e1-8e66-c647ca06125e","Type":"ContainerStarted","Data":"67e566cc9a9572e9462049cf06f0fecc45f277769c1aeee8ebc2307baca206bb"} Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.851965 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cbb465496-fhs6h" event={"ID":"718a89d5-d692-49e1-8e66-c647ca06125e","Type":"ContainerStarted","Data":"28d84088a9e0f178b1b29ecd37678f303013cfa1f1aadda0f8eea4e3f09ad055"} Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.853754 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b99b5b7f7-z8lvm" event={"ID":"cef8fcc7-7d06-439c-8d41-1948f9fbda1b","Type":"ContainerStarted","Data":"5d72c926456fae61c0d8546a07c4559d7e654f65b73d66c2d3c8a33d282a2db2"} Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.856689 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerStarted","Data":"b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a"} Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.858918 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6","Type":"ContainerStarted","Data":"db92a62ece11b20e66e8e13ca7abda8813118d94ce86939935067605e774e265"} Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.859055 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6","Type":"ContainerStarted","Data":"ca0ee4efc129728e4d5a048c1f843f5f4fe2f945b418971873669079cf497b0a"} Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.861261 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-zl26d" event={"ID":"0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c","Type":"ContainerDied","Data":"c951bf0371a52580341db838253cd2d0cd7fefa4ba4e8252c82dba749958e609"} Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.861310 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c951bf0371a52580341db838253cd2d0cd7fefa4ba4e8252c82dba749958e609" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.861366 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-zl26d" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.881641 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=12.881601361 podStartE2EDuration="12.881601361s" podCreationTimestamp="2025-09-29 14:01:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:19.874028414 +0000 UTC m=+1206.314672734" watchObservedRunningTime="2025-09-29 14:01:19.881601361 +0000 UTC m=+1206.322245681" Sep 29 14:01:19 crc kubenswrapper[4869]: E0929 14:01:19.883504 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:19 crc kubenswrapper[4869]: E0929 14:01:19.885400 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:19 crc kubenswrapper[4869]: E0929 14:01:19.886920 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:19 crc kubenswrapper[4869]: E0929 14:01:19.887006 4869 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.978283 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Sep 29 14:01:19 crc kubenswrapper[4869]: I0929 14:01:19.986381 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.337240 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-68bc68cf7f-ms9sv"] Sep 29 14:01:20 crc kubenswrapper[4869]: E0929 14:01:20.338032 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c" containerName="barbican-db-sync" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.338062 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c" containerName="barbican-db-sync" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.338242 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c" containerName="barbican-db-sync" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.339488 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.342383 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.345683 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-965dl" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.345876 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.392668 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-68bc68cf7f-ms9sv"] Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.445021 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/628780b7-eee4-4590-b398-ae564ac773a0-logs\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.445071 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-config-data-custom\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.459855 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-combined-ca-bundle\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.460147 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-config-data\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.460203 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl2wj\" (UniqueName: \"kubernetes.io/projected/628780b7-eee4-4590-b398-ae564ac773a0-kube-api-access-gl2wj\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.519137 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-688784644b-5fblt"] Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.531075 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.549460 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.562777 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/628780b7-eee4-4590-b398-ae564ac773a0-logs\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.562825 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-config-data-custom\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.562864 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-combined-ca-bundle\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.562959 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-config-data\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.562992 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl2wj\" (UniqueName: \"kubernetes.io/projected/628780b7-eee4-4590-b398-ae564ac773a0-kube-api-access-gl2wj\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.565583 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/628780b7-eee4-4590-b398-ae564ac773a0-logs\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.573824 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-config-data\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.575385 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-combined-ca-bundle\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.585434 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/628780b7-eee4-4590-b398-ae564ac773a0-config-data-custom\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.590775 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-688784644b-5fblt"] Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.594837 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl2wj\" (UniqueName: \"kubernetes.io/projected/628780b7-eee4-4590-b398-ae564ac773a0-kube-api-access-gl2wj\") pod \"barbican-worker-68bc68cf7f-ms9sv\" (UID: \"628780b7-eee4-4590-b398-ae564ac773a0\") " pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.614233 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86dbb9557f-sjgn6"] Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.615991 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.640192 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86dbb9557f-sjgn6"] Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.650842 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-74d588c6fd-2lr92"] Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.658570 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.662581 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.668926 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-combined-ca-bundle\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.668980 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgw48\" (UniqueName: \"kubernetes.io/projected/c026feba-a18f-4ced-9f53-a93aa8bca990-kube-api-access-tgw48\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669000 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63bb7554-9ece-4e74-a823-2d1e4489e72c-logs\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669025 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhdg4\" (UniqueName: \"kubernetes.io/projected/63bb7554-9ece-4e74-a823-2d1e4489e72c-kube-api-access-dhdg4\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669052 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-config-data\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669075 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-config\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669093 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf6d273a-a151-4bfc-af19-0012e08ec38d-logs\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669121 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-dns-svc\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669142 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669182 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-nb\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669222 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-combined-ca-bundle\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669282 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data-custom\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669328 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkf5z\" (UniqueName: \"kubernetes.io/projected/cf6d273a-a151-4bfc-af19-0012e08ec38d-kube-api-access-qkf5z\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669371 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-config-data-custom\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.669396 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-sb\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.673803 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-74d588c6fd-2lr92"] Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.718092 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-68bc68cf7f-ms9sv" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.772727 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data-custom\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773021 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkf5z\" (UniqueName: \"kubernetes.io/projected/cf6d273a-a151-4bfc-af19-0012e08ec38d-kube-api-access-qkf5z\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773068 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-config-data-custom\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773090 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-sb\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773133 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-combined-ca-bundle\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773170 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgw48\" (UniqueName: \"kubernetes.io/projected/c026feba-a18f-4ced-9f53-a93aa8bca990-kube-api-access-tgw48\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773190 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63bb7554-9ece-4e74-a823-2d1e4489e72c-logs\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773208 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhdg4\" (UniqueName: \"kubernetes.io/projected/63bb7554-9ece-4e74-a823-2d1e4489e72c-kube-api-access-dhdg4\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773228 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-config-data\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773244 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-config\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773262 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf6d273a-a151-4bfc-af19-0012e08ec38d-logs\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773307 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-dns-svc\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773322 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773356 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-nb\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.773387 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-combined-ca-bundle\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.777154 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-dns-svc\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.778302 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-config\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.778553 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf6d273a-a151-4bfc-af19-0012e08ec38d-logs\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.778816 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63bb7554-9ece-4e74-a823-2d1e4489e72c-logs\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.781257 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-nb\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.781778 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data-custom\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.781802 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-sb\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.782572 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.786487 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-combined-ca-bundle\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.790214 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-config-data-custom\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.798230 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-config-data\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.799133 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63bb7554-9ece-4e74-a823-2d1e4489e72c-combined-ca-bundle\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.800677 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhdg4\" (UniqueName: \"kubernetes.io/projected/63bb7554-9ece-4e74-a823-2d1e4489e72c-kube-api-access-dhdg4\") pod \"barbican-keystone-listener-688784644b-5fblt\" (UID: \"63bb7554-9ece-4e74-a823-2d1e4489e72c\") " pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.805193 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkf5z\" (UniqueName: \"kubernetes.io/projected/cf6d273a-a151-4bfc-af19-0012e08ec38d-kube-api-access-qkf5z\") pod \"barbican-api-74d588c6fd-2lr92\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.823195 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgw48\" (UniqueName: \"kubernetes.io/projected/c026feba-a18f-4ced-9f53-a93aa8bca990-kube-api-access-tgw48\") pod \"dnsmasq-dns-86dbb9557f-sjgn6\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.869014 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-688784644b-5fblt" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.887473 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cbb465496-fhs6h" event={"ID":"718a89d5-d692-49e1-8e66-c647ca06125e","Type":"ContainerStarted","Data":"9aa7234bd13121b2f5b267b42b096a5ec70d01cbce41ee672dcd6f5de177a943"} Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.888717 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.888742 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.904884 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b99b5b7f7-z8lvm" event={"ID":"cef8fcc7-7d06-439c-8d41-1948f9fbda1b","Type":"ContainerStarted","Data":"4eed98e81f61d94d73b7d43a5c79ade52f3760eeed8541fd9403c3c314bf1948"} Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.904948 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.917024 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5cbb465496-fhs6h" podStartSLOduration=4.917007091 podStartE2EDuration="4.917007091s" podCreationTimestamp="2025-09-29 14:01:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:20.914010833 +0000 UTC m=+1207.354655153" watchObservedRunningTime="2025-09-29 14:01:20.917007091 +0000 UTC m=+1207.357651411" Sep 29 14:01:20 crc kubenswrapper[4869]: I0929 14:01:20.994481 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.009168 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.279890 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5b99b5b7f7-z8lvm" podStartSLOduration=3.279870213 podStartE2EDuration="3.279870213s" podCreationTimestamp="2025-09-29 14:01:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:20.943700356 +0000 UTC m=+1207.384344676" watchObservedRunningTime="2025-09-29 14:01:21.279870213 +0000 UTC m=+1207.720514533" Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.282127 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-68bc68cf7f-ms9sv"] Sep 29 14:01:21 crc kubenswrapper[4869]: W0929 14:01:21.317080 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod628780b7_eee4_4590_b398_ae564ac773a0.slice/crio-13c32587ef6215256a87e740677ca3526cb1e7adaa38899b667f40c5813bb3d8 WatchSource:0}: Error finding container 13c32587ef6215256a87e740677ca3526cb1e7adaa38899b667f40c5813bb3d8: Status 404 returned error can't find the container with id 13c32587ef6215256a87e740677ca3526cb1e7adaa38899b667f40c5813bb3d8 Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.433084 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-688784644b-5fblt"] Sep 29 14:01:21 crc kubenswrapper[4869]: W0929 14:01:21.630421 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc026feba_a18f_4ced_9f53_a93aa8bca990.slice/crio-0a514b57d6fcbc5bdd23db749866ff562917afd3095ac26c9c6c0d19fe92fe4a WatchSource:0}: Error finding container 0a514b57d6fcbc5bdd23db749866ff562917afd3095ac26c9c6c0d19fe92fe4a: Status 404 returned error can't find the container with id 0a514b57d6fcbc5bdd23db749866ff562917afd3095ac26c9c6c0d19fe92fe4a Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.633967 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86dbb9557f-sjgn6"] Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.731901 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-74d588c6fd-2lr92"] Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.925230 4869 generic.go:334] "Generic (PLEG): container finished" podID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerID="4d2946f21ee5551fcbb04e6d11cc6429f5a777e36baa2592cbb21eb77d602c3c" exitCode=0 Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.925338 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" event={"ID":"c026feba-a18f-4ced-9f53-a93aa8bca990","Type":"ContainerDied","Data":"4d2946f21ee5551fcbb04e6d11cc6429f5a777e36baa2592cbb21eb77d602c3c"} Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.925626 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" event={"ID":"c026feba-a18f-4ced-9f53-a93aa8bca990","Type":"ContainerStarted","Data":"0a514b57d6fcbc5bdd23db749866ff562917afd3095ac26c9c6c0d19fe92fe4a"} Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.928123 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74d588c6fd-2lr92" event={"ID":"cf6d273a-a151-4bfc-af19-0012e08ec38d","Type":"ContainerStarted","Data":"7af938c4f2a8968460970d78f7b7003bda4558e9f37d9e0f51740a673ee10595"} Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.928170 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74d588c6fd-2lr92" event={"ID":"cf6d273a-a151-4bfc-af19-0012e08ec38d","Type":"ContainerStarted","Data":"5ceec85d139405299472a60150701c5e3780e508d10f1c6b5d2796b03ad8a18b"} Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.929789 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-68bc68cf7f-ms9sv" event={"ID":"628780b7-eee4-4590-b398-ae564ac773a0","Type":"ContainerStarted","Data":"13c32587ef6215256a87e740677ca3526cb1e7adaa38899b667f40c5813bb3d8"} Sep 29 14:01:21 crc kubenswrapper[4869]: I0929 14:01:21.932035 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-688784644b-5fblt" event={"ID":"63bb7554-9ece-4e74-a823-2d1e4489e72c","Type":"ContainerStarted","Data":"9b1800e8a5c49ff8be7f41b132483bf905074e709bfe9da26c06486a61c5beb1"} Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.958420 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-645c66bc68-xrb58"] Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.961481 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.963565 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.963849 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.968755 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74d588c6fd-2lr92" event={"ID":"cf6d273a-a151-4bfc-af19-0012e08ec38d","Type":"ContainerStarted","Data":"6ec1366bdd397162419d2bc9765d94183846b57299d355daecf0ab34b8fb51dc"} Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.968943 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.969018 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.972142 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-645c66bc68-xrb58"] Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.976044 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" event={"ID":"c026feba-a18f-4ced-9f53-a93aa8bca990","Type":"ContainerStarted","Data":"596ccfc878f588b6fb914a509f2f165b75e883a3fe8b4898dead6c0e29b686c3"} Sep 29 14:01:22 crc kubenswrapper[4869]: I0929 14:01:22.977092 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.021695 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" podStartSLOduration=3.021678232 podStartE2EDuration="3.021678232s" podCreationTimestamp="2025-09-29 14:01:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:23.015249654 +0000 UTC m=+1209.455893974" watchObservedRunningTime="2025-09-29 14:01:23.021678232 +0000 UTC m=+1209.462322552" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.034874 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-74d588c6fd-2lr92" podStartSLOduration=3.034855024 podStartE2EDuration="3.034855024s" podCreationTimestamp="2025-09-29 14:01:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:23.032416171 +0000 UTC m=+1209.473060491" watchObservedRunningTime="2025-09-29 14:01:23.034855024 +0000 UTC m=+1209.475499344" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.047414 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-combined-ca-bundle\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.047579 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-config-data\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.047603 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8d34c4e-9d7e-4026-bb9f-0e356149a209-logs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.047642 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-public-tls-certs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.047724 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-config-data-custom\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.047750 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-internal-tls-certs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.047859 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6cwp\" (UniqueName: \"kubernetes.io/projected/d8d34c4e-9d7e-4026-bb9f-0e356149a209-kube-api-access-m6cwp\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.148565 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-config-data\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.148634 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8d34c4e-9d7e-4026-bb9f-0e356149a209-logs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.148655 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-public-tls-certs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.148709 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-config-data-custom\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.148728 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-internal-tls-certs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.148774 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6cwp\" (UniqueName: \"kubernetes.io/projected/d8d34c4e-9d7e-4026-bb9f-0e356149a209-kube-api-access-m6cwp\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.148816 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-combined-ca-bundle\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.151080 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8d34c4e-9d7e-4026-bb9f-0e356149a209-logs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.160465 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-public-tls-certs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.164840 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-config-data-custom\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.167361 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-config-data\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.168249 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-combined-ca-bundle\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.171037 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8d34c4e-9d7e-4026-bb9f-0e356149a209-internal-tls-certs\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.171234 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6cwp\" (UniqueName: \"kubernetes.io/projected/d8d34c4e-9d7e-4026-bb9f-0e356149a209-kube-api-access-m6cwp\") pod \"barbican-api-645c66bc68-xrb58\" (UID: \"d8d34c4e-9d7e-4026-bb9f-0e356149a209\") " pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:23 crc kubenswrapper[4869]: I0929 14:01:23.282918 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:24 crc kubenswrapper[4869]: I0929 14:01:24.002906 4869 generic.go:334] "Generic (PLEG): container finished" podID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" containerID="db92a62ece11b20e66e8e13ca7abda8813118d94ce86939935067605e774e265" exitCode=1 Sep 29 14:01:24 crc kubenswrapper[4869]: I0929 14:01:24.003120 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6","Type":"ContainerDied","Data":"db92a62ece11b20e66e8e13ca7abda8813118d94ce86939935067605e774e265"} Sep 29 14:01:24 crc kubenswrapper[4869]: I0929 14:01:24.004906 4869 scope.go:117] "RemoveContainer" containerID="db92a62ece11b20e66e8e13ca7abda8813118d94ce86939935067605e774e265" Sep 29 14:01:24 crc kubenswrapper[4869]: I0929 14:01:24.356167 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-645c66bc68-xrb58"] Sep 29 14:01:24 crc kubenswrapper[4869]: E0929 14:01:24.887299 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:24 crc kubenswrapper[4869]: E0929 14:01:24.894707 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:24 crc kubenswrapper[4869]: E0929 14:01:24.896275 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:24 crc kubenswrapper[4869]: E0929 14:01:24.896338 4869 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.023649 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-645c66bc68-xrb58" event={"ID":"d8d34c4e-9d7e-4026-bb9f-0e356149a209","Type":"ContainerStarted","Data":"b9786d621bd10af1e2e9f5b95f31188b5b10ed2046ef72234ac637d8de60a11d"} Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.023694 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-645c66bc68-xrb58" event={"ID":"d8d34c4e-9d7e-4026-bb9f-0e356149a209","Type":"ContainerStarted","Data":"bfe373c44600604c85febf379dd981e1b80d1c2794ab4b8927db3fd900a0bc94"} Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.023707 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-645c66bc68-xrb58" event={"ID":"d8d34c4e-9d7e-4026-bb9f-0e356149a209","Type":"ContainerStarted","Data":"a2ef86f3643143829d72d39b6511d02f6ddb31d789443590149c536ca60a6792"} Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.023827 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.033003 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-68bc68cf7f-ms9sv" event={"ID":"628780b7-eee4-4590-b398-ae564ac773a0","Type":"ContainerStarted","Data":"5c05c664132b227589aac862d8eff2fb6ebd669269fdc331a1dee8bb974be1da"} Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.033058 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-68bc68cf7f-ms9sv" event={"ID":"628780b7-eee4-4590-b398-ae564ac773a0","Type":"ContainerStarted","Data":"90f2dd5ca2b99af1e17613a696a10dace0f0ec0fdf131f3a66fb27dc9a96605f"} Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.046096 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-645c66bc68-xrb58" podStartSLOduration=3.0460811740000002 podStartE2EDuration="3.046081174s" podCreationTimestamp="2025-09-29 14:01:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:25.04438781 +0000 UTC m=+1211.485032130" watchObservedRunningTime="2025-09-29 14:01:25.046081174 +0000 UTC m=+1211.486725494" Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.049416 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-688784644b-5fblt" event={"ID":"63bb7554-9ece-4e74-a823-2d1e4489e72c","Type":"ContainerStarted","Data":"8cf665d97388a27f03d638e85dffe5f91eec20eef6cb03fb2d54f5457ac8cd79"} Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.049455 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-688784644b-5fblt" event={"ID":"63bb7554-9ece-4e74-a823-2d1e4489e72c","Type":"ContainerStarted","Data":"7588d395d0a8d3e75b9a3a416e14393d9ac21d7d5f2ad16c80c6bfb20dc2f53b"} Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.074393 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6","Type":"ContainerStarted","Data":"856118de09e84cea6dbacafc8467b52022c3bc2ee7b187b1ad9cc21e8d95c937"} Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.088313 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-68bc68cf7f-ms9sv" podStartSLOduration=2.513610183 podStartE2EDuration="5.088289702s" podCreationTimestamp="2025-09-29 14:01:20 +0000 UTC" firstStartedPulling="2025-09-29 14:01:21.320721516 +0000 UTC m=+1207.761365836" lastFinishedPulling="2025-09-29 14:01:23.895401025 +0000 UTC m=+1210.336045355" observedRunningTime="2025-09-29 14:01:25.064145444 +0000 UTC m=+1211.504789764" watchObservedRunningTime="2025-09-29 14:01:25.088289702 +0000 UTC m=+1211.528934022" Sep 29 14:01:25 crc kubenswrapper[4869]: I0929 14:01:25.144841 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-688784644b-5fblt" podStartSLOduration=2.702006164 podStartE2EDuration="5.144824493s" podCreationTimestamp="2025-09-29 14:01:20 +0000 UTC" firstStartedPulling="2025-09-29 14:01:21.450540703 +0000 UTC m=+1207.891185023" lastFinishedPulling="2025-09-29 14:01:23.893359032 +0000 UTC m=+1210.334003352" observedRunningTime="2025-09-29 14:01:25.100324005 +0000 UTC m=+1211.540968325" watchObservedRunningTime="2025-09-29 14:01:25.144824493 +0000 UTC m=+1211.585468813" Sep 29 14:01:26 crc kubenswrapper[4869]: I0929 14:01:26.082991 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:26 crc kubenswrapper[4869]: I0929 14:01:26.289088 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:01:26 crc kubenswrapper[4869]: I0929 14:01:26.289529 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api-log" containerID="cri-o://da53aa15d4668f1bb6acaf7cbc304c147b73119d2fac6b72fbddc3f1fbd1c7a1" gracePeriod=30 Sep 29 14:01:26 crc kubenswrapper[4869]: I0929 14:01:26.290058 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api" containerID="cri-o://461769584b8901c869aa2daee5c9029de5164ff78f77a904e4750f00bfdd8c1f" gracePeriod=30 Sep 29 14:01:27 crc kubenswrapper[4869]: I0929 14:01:27.101408 4869 generic.go:334] "Generic (PLEG): container finished" podID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerID="da53aa15d4668f1bb6acaf7cbc304c147b73119d2fac6b72fbddc3f1fbd1c7a1" exitCode=143 Sep 29 14:01:27 crc kubenswrapper[4869]: I0929 14:01:27.102843 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"cd994194-27ba-4c7b-920e-8f8a368dfe13","Type":"ContainerDied","Data":"da53aa15d4668f1bb6acaf7cbc304c147b73119d2fac6b72fbddc3f1fbd1c7a1"} Sep 29 14:01:27 crc kubenswrapper[4869]: I0929 14:01:27.445879 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:27 crc kubenswrapper[4869]: I0929 14:01:27.495440 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:27 crc kubenswrapper[4869]: I0929 14:01:27.937084 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:28 crc kubenswrapper[4869]: I0929 14:01:28.113636 4869 generic.go:334] "Generic (PLEG): container finished" podID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" containerID="856118de09e84cea6dbacafc8467b52022c3bc2ee7b187b1ad9cc21e8d95c937" exitCode=1 Sep 29 14:01:28 crc kubenswrapper[4869]: I0929 14:01:28.113745 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6","Type":"ContainerDied","Data":"856118de09e84cea6dbacafc8467b52022c3bc2ee7b187b1ad9cc21e8d95c937"} Sep 29 14:01:28 crc kubenswrapper[4869]: I0929 14:01:28.113781 4869 scope.go:117] "RemoveContainer" containerID="db92a62ece11b20e66e8e13ca7abda8813118d94ce86939935067605e774e265" Sep 29 14:01:28 crc kubenswrapper[4869]: I0929 14:01:28.114459 4869 scope.go:117] "RemoveContainer" containerID="856118de09e84cea6dbacafc8467b52022c3bc2ee7b187b1ad9cc21e8d95c937" Sep 29 14:01:28 crc kubenswrapper[4869]: E0929 14:01:28.114685 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(103100a6-0dbb-481c-ba0e-4e7a2e5c38f6)\"" pod="openstack/watcher-decision-engine-0" podUID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" Sep 29 14:01:28 crc kubenswrapper[4869]: I0929 14:01:28.124595 4869 generic.go:334] "Generic (PLEG): container finished" podID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerID="461769584b8901c869aa2daee5c9029de5164ff78f77a904e4750f00bfdd8c1f" exitCode=0 Sep 29 14:01:28 crc kubenswrapper[4869]: I0929 14:01:28.124644 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"cd994194-27ba-4c7b-920e-8f8a368dfe13","Type":"ContainerDied","Data":"461769584b8901c869aa2daee5c9029de5164ff78f77a904e4750f00bfdd8c1f"} Sep 29 14:01:29 crc kubenswrapper[4869]: I0929 14:01:29.135275 4869 scope.go:117] "RemoveContainer" containerID="856118de09e84cea6dbacafc8467b52022c3bc2ee7b187b1ad9cc21e8d95c937" Sep 29 14:01:29 crc kubenswrapper[4869]: E0929 14:01:29.136058 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(103100a6-0dbb-481c-ba0e-4e7a2e5c38f6)\"" pod="openstack/watcher-decision-engine-0" podUID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" Sep 29 14:01:29 crc kubenswrapper[4869]: I0929 14:01:29.479320 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:29 crc kubenswrapper[4869]: E0929 14:01:29.878560 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:29 crc kubenswrapper[4869]: E0929 14:01:29.887807 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:29 crc kubenswrapper[4869]: E0929 14:01:29.889542 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Sep 29 14:01:29 crc kubenswrapper[4869]: E0929 14:01:29.889588 4869 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:29 crc kubenswrapper[4869]: I0929 14:01:29.967375 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.156:9322/\": dial tcp 10.217.0.156:9322: connect: connection refused" Sep 29 14:01:29 crc kubenswrapper[4869]: I0929 14:01:29.967480 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.156:9322/\": dial tcp 10.217.0.156:9322: connect: connection refused" Sep 29 14:01:30 crc kubenswrapper[4869]: I0929 14:01:30.997795 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:01:31 crc kubenswrapper[4869]: I0929 14:01:31.082368 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f8fbbf5-nd8gs"] Sep 29 14:01:31 crc kubenswrapper[4869]: I0929 14:01:31.082967 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" podUID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" containerName="dnsmasq-dns" containerID="cri-o://24f208b6d5c8876051601a8ee01bee27fbb55b7f61e944307824f3b0761e7ef1" gracePeriod=10 Sep 29 14:01:32 crc kubenswrapper[4869]: I0929 14:01:32.165413 4869 generic.go:334] "Generic (PLEG): container finished" podID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" containerID="24f208b6d5c8876051601a8ee01bee27fbb55b7f61e944307824f3b0761e7ef1" exitCode=0 Sep 29 14:01:32 crc kubenswrapper[4869]: I0929 14:01:32.165599 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" event={"ID":"50f38cd8-6393-417f-93cb-9cc3b4e75e93","Type":"ContainerDied","Data":"24f208b6d5c8876051601a8ee01bee27fbb55b7f61e944307824f3b0761e7ef1"} Sep 29 14:01:33 crc kubenswrapper[4869]: I0929 14:01:33.200932 4869 generic.go:334] "Generic (PLEG): container finished" podID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" exitCode=137 Sep 29 14:01:33 crc kubenswrapper[4869]: I0929 14:01:33.201039 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"50dde599-0ce1-482d-b6ab-402e7b9a9997","Type":"ContainerDied","Data":"9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110"} Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.214182 4869 generic.go:334] "Generic (PLEG): container finished" podID="e99dfaa4-d2d7-411a-bea7-4a4768c31ee4" containerID="c1b5c190354c6081c48119f76f3d1a7d5e741635899a3177a3ca14daf830eec3" exitCode=0 Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.214417 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-h5s6m" event={"ID":"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4","Type":"ContainerDied","Data":"c1b5c190354c6081c48119f76f3d1a7d5e741635899a3177a3ca14daf830eec3"} Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.669392 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.690065 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.771141 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.802647 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-config\") pod \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.802702 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-nb\") pod \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.802734 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pmlf\" (UniqueName: \"kubernetes.io/projected/50dde599-0ce1-482d-b6ab-402e7b9a9997-kube-api-access-7pmlf\") pod \"50dde599-0ce1-482d-b6ab-402e7b9a9997\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.802910 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-sb\") pod \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.802942 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-config-data\") pod \"50dde599-0ce1-482d-b6ab-402e7b9a9997\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.803071 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-dns-svc\") pod \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.803113 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-combined-ca-bundle\") pod \"50dde599-0ce1-482d-b6ab-402e7b9a9997\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.803135 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50dde599-0ce1-482d-b6ab-402e7b9a9997-logs\") pod \"50dde599-0ce1-482d-b6ab-402e7b9a9997\" (UID: \"50dde599-0ce1-482d-b6ab-402e7b9a9997\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.803162 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfgcd\" (UniqueName: \"kubernetes.io/projected/50f38cd8-6393-417f-93cb-9cc3b4e75e93-kube-api-access-sfgcd\") pod \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\" (UID: \"50f38cd8-6393-417f-93cb-9cc3b4e75e93\") " Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.815305 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50f38cd8-6393-417f-93cb-9cc3b4e75e93-kube-api-access-sfgcd" (OuterVolumeSpecName: "kube-api-access-sfgcd") pod "50f38cd8-6393-417f-93cb-9cc3b4e75e93" (UID: "50f38cd8-6393-417f-93cb-9cc3b4e75e93"). InnerVolumeSpecName "kube-api-access-sfgcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.815948 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50dde599-0ce1-482d-b6ab-402e7b9a9997-logs" (OuterVolumeSpecName: "logs") pod "50dde599-0ce1-482d-b6ab-402e7b9a9997" (UID: "50dde599-0ce1-482d-b6ab-402e7b9a9997"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.840448 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50dde599-0ce1-482d-b6ab-402e7b9a9997-kube-api-access-7pmlf" (OuterVolumeSpecName: "kube-api-access-7pmlf") pod "50dde599-0ce1-482d-b6ab-402e7b9a9997" (UID: "50dde599-0ce1-482d-b6ab-402e7b9a9997"). InnerVolumeSpecName "kube-api-access-7pmlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.840504 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.895960 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "50f38cd8-6393-417f-93cb-9cc3b4e75e93" (UID: "50f38cd8-6393-417f-93cb-9cc3b4e75e93"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.901784 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50dde599-0ce1-482d-b6ab-402e7b9a9997" (UID: "50dde599-0ce1-482d-b6ab-402e7b9a9997"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.906330 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfgcd\" (UniqueName: \"kubernetes.io/projected/50f38cd8-6393-417f-93cb-9cc3b4e75e93-kube-api-access-sfgcd\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.906358 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pmlf\" (UniqueName: \"kubernetes.io/projected/50dde599-0ce1-482d-b6ab-402e7b9a9997-kube-api-access-7pmlf\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.906368 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.906378 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.906387 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50dde599-0ce1-482d-b6ab-402e7b9a9997-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.913728 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "50f38cd8-6393-417f-93cb-9cc3b4e75e93" (UID: "50f38cd8-6393-417f-93cb-9cc3b4e75e93"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.933848 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "50f38cd8-6393-417f-93cb-9cc3b4e75e93" (UID: "50f38cd8-6393-417f-93cb-9cc3b4e75e93"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.947856 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-config-data" (OuterVolumeSpecName: "config-data") pod "50dde599-0ce1-482d-b6ab-402e7b9a9997" (UID: "50dde599-0ce1-482d-b6ab-402e7b9a9997"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:34 crc kubenswrapper[4869]: I0929 14:01:34.953988 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-config" (OuterVolumeSpecName: "config") pod "50f38cd8-6393-417f-93cb-9cc3b4e75e93" (UID: "50f38cd8-6393-417f-93cb-9cc3b4e75e93"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.008442 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-custom-prometheus-ca\") pod \"cd994194-27ba-4c7b-920e-8f8a368dfe13\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.009075 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-config-data\") pod \"cd994194-27ba-4c7b-920e-8f8a368dfe13\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.009146 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whhjq\" (UniqueName: \"kubernetes.io/projected/cd994194-27ba-4c7b-920e-8f8a368dfe13-kube-api-access-whhjq\") pod \"cd994194-27ba-4c7b-920e-8f8a368dfe13\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.009235 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-combined-ca-bundle\") pod \"cd994194-27ba-4c7b-920e-8f8a368dfe13\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.009343 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd994194-27ba-4c7b-920e-8f8a368dfe13-logs\") pod \"cd994194-27ba-4c7b-920e-8f8a368dfe13\" (UID: \"cd994194-27ba-4c7b-920e-8f8a368dfe13\") " Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.010151 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50dde599-0ce1-482d-b6ab-402e7b9a9997-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.010182 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.010196 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.010209 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50f38cd8-6393-417f-93cb-9cc3b4e75e93-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.011039 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd994194-27ba-4c7b-920e-8f8a368dfe13-logs" (OuterVolumeSpecName: "logs") pod "cd994194-27ba-4c7b-920e-8f8a368dfe13" (UID: "cd994194-27ba-4c7b-920e-8f8a368dfe13"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.020935 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd994194-27ba-4c7b-920e-8f8a368dfe13-kube-api-access-whhjq" (OuterVolumeSpecName: "kube-api-access-whhjq") pod "cd994194-27ba-4c7b-920e-8f8a368dfe13" (UID: "cd994194-27ba-4c7b-920e-8f8a368dfe13"). InnerVolumeSpecName "kube-api-access-whhjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.045342 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd994194-27ba-4c7b-920e-8f8a368dfe13" (UID: "cd994194-27ba-4c7b-920e-8f8a368dfe13"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.062826 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "cd994194-27ba-4c7b-920e-8f8a368dfe13" (UID: "cd994194-27ba-4c7b-920e-8f8a368dfe13"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.080131 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-645c66bc68-xrb58" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.091279 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-config-data" (OuterVolumeSpecName: "config-data") pod "cd994194-27ba-4c7b-920e-8f8a368dfe13" (UID: "cd994194-27ba-4c7b-920e-8f8a368dfe13"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.114770 4869 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.114813 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.114826 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whhjq\" (UniqueName: \"kubernetes.io/projected/cd994194-27ba-4c7b-920e-8f8a368dfe13-kube-api-access-whhjq\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.114841 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd994194-27ba-4c7b-920e-8f8a368dfe13-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.114853 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd994194-27ba-4c7b-920e-8f8a368dfe13-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.168754 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-74d588c6fd-2lr92"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.169417 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-74d588c6fd-2lr92" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api-log" containerID="cri-o://7af938c4f2a8968460970d78f7b7003bda4558e9f37d9e0f51740a673ee10595" gracePeriod=30 Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.170127 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-74d588c6fd-2lr92" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api" containerID="cri-o://6ec1366bdd397162419d2bc9765d94183846b57299d355daecf0ab34b8fb51dc" gracePeriod=30 Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.245988 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerStarted","Data":"132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde"} Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.246239 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="ceilometer-central-agent" containerID="cri-o://11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420" gracePeriod=30 Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.246568 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.246791 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="proxy-httpd" containerID="cri-o://132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde" gracePeriod=30 Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.246963 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="sg-core" containerID="cri-o://b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a" gracePeriod=30 Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.247034 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="ceilometer-notification-agent" containerID="cri-o://ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42" gracePeriod=30 Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.288394 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"cd994194-27ba-4c7b-920e-8f8a368dfe13","Type":"ContainerDied","Data":"f22f226ec738df54e907cf14e32681e7c18a79dd1fd54a7bb1adcb69724d69f3"} Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.288459 4869 scope.go:117] "RemoveContainer" containerID="461769584b8901c869aa2daee5c9029de5164ff78f77a904e4750f00bfdd8c1f" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.288587 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.293530 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.589667124 podStartE2EDuration="46.293508948s" podCreationTimestamp="2025-09-29 14:00:49 +0000 UTC" firstStartedPulling="2025-09-29 14:00:50.956848268 +0000 UTC m=+1177.397492588" lastFinishedPulling="2025-09-29 14:01:34.660690092 +0000 UTC m=+1221.101334412" observedRunningTime="2025-09-29 14:01:35.281254779 +0000 UTC m=+1221.721899099" watchObservedRunningTime="2025-09-29 14:01:35.293508948 +0000 UTC m=+1221.734153268" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.336805 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.336871 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"50dde599-0ce1-482d-b6ab-402e7b9a9997","Type":"ContainerDied","Data":"f18a6a84cb4a99f3ce944689c4d66e97f1406fd0d57988495d7c60e2ac46a0e9"} Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.351771 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" event={"ID":"50f38cd8-6393-417f-93cb-9cc3b4e75e93","Type":"ContainerDied","Data":"d948327b87c2c31fce490fdb52274cf7da090ccccc217d244307c70601453f06"} Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.351859 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f8fbbf5-nd8gs" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.434686 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.463066 4869 scope.go:117] "RemoveContainer" containerID="da53aa15d4668f1bb6acaf7cbc304c147b73119d2fac6b72fbddc3f1fbd1c7a1" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.469261 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514014 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:01:35 crc kubenswrapper[4869]: E0929 14:01:35.514490 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" containerName="init" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514502 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" containerName="init" Sep 29 14:01:35 crc kubenswrapper[4869]: E0929 14:01:35.514532 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514540 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api" Sep 29 14:01:35 crc kubenswrapper[4869]: E0929 14:01:35.514552 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514557 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:35 crc kubenswrapper[4869]: E0929 14:01:35.514570 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" containerName="dnsmasq-dns" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514576 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" containerName="dnsmasq-dns" Sep 29 14:01:35 crc kubenswrapper[4869]: E0929 14:01:35.514590 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api-log" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514596 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api-log" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514784 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api-log" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514798 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" containerName="dnsmasq-dns" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514812 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" containerName="watcher-applier" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.514823 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" containerName="watcher-api" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.515930 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.523780 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.524001 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.528704 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.548930 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.658483 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.677018 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.684680 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f8fbbf5-nd8gs"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.760706 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.760757 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-public-tls-certs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.760782 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-config-data\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.760904 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.760942 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lh9z7\" (UniqueName: \"kubernetes.io/projected/95555888-1c52-4e91-ac6c-b85c38094784-kube-api-access-lh9z7\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.761097 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95555888-1c52-4e91-ac6c-b85c38094784-logs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.761197 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.762991 4869 scope.go:117] "RemoveContainer" containerID="9d86c8929f6c74a78fd79551de29472f0734790149342200ccb7910066db5110" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.795678 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f8fbbf5-nd8gs"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.841692 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.843140 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.855259 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.863698 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.863808 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.863838 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-public-tls-certs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.863876 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-config-data\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.863975 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.864016 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lh9z7\" (UniqueName: \"kubernetes.io/projected/95555888-1c52-4e91-ac6c-b85c38094784-kube-api-access-lh9z7\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.864126 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95555888-1c52-4e91-ac6c-b85c38094784-logs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.865117 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95555888-1c52-4e91-ac6c-b85c38094784-logs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.880434 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.883781 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-public-tls-certs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.888558 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-config-data\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.905703 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.906643 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95555888-1c52-4e91-ac6c-b85c38094784-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.909696 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.910256 4869 scope.go:117] "RemoveContainer" containerID="24f208b6d5c8876051601a8ee01bee27fbb55b7f61e944307824f3b0761e7ef1" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.922031 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lh9z7\" (UniqueName: \"kubernetes.io/projected/95555888-1c52-4e91-ac6c-b85c38094784-kube-api-access-lh9z7\") pod \"watcher-api-0\" (UID: \"95555888-1c52-4e91-ac6c-b85c38094784\") " pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.957446 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.968679 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-logs\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.968761 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.968789 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-config-data\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:35 crc kubenswrapper[4869]: I0929 14:01:35.968852 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7sck\" (UniqueName: \"kubernetes.io/projected/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-kube-api-access-k7sck\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.054028 4869 scope.go:117] "RemoveContainer" containerID="160ce0cbd49c4afc8ac104db307b6866413b2ef183df86234eee3cd505d3901a" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.070491 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.070549 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-config-data\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.070685 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7sck\" (UniqueName: \"kubernetes.io/projected/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-kube-api-access-k7sck\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.070735 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-logs\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.071490 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-logs\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.080329 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-config-data\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.082317 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.091141 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7sck\" (UniqueName: \"kubernetes.io/projected/2bccd1d9-e5f1-4608-ac93-e364aac95e6c-kube-api-access-k7sck\") pod \"watcher-applier-0\" (UID: \"2bccd1d9-e5f1-4608-ac93-e364aac95e6c\") " pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.194039 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.270576 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50dde599-0ce1-482d-b6ab-402e7b9a9997" path="/var/lib/kubelet/pods/50dde599-0ce1-482d-b6ab-402e7b9a9997/volumes" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.271157 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50f38cd8-6393-417f-93cb-9cc3b4e75e93" path="/var/lib/kubelet/pods/50f38cd8-6393-417f-93cb-9cc3b4e75e93/volumes" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.271932 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd994194-27ba-4c7b-920e-8f8a368dfe13" path="/var/lib/kubelet/pods/cd994194-27ba-4c7b-920e-8f8a368dfe13/volumes" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.281993 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-combined-ca-bundle\") pod \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.282096 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2m8v\" (UniqueName: \"kubernetes.io/projected/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-kube-api-access-v2m8v\") pod \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.282258 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-config\") pod \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\" (UID: \"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4\") " Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.288972 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-kube-api-access-v2m8v" (OuterVolumeSpecName: "kube-api-access-v2m8v") pod "e99dfaa4-d2d7-411a-bea7-4a4768c31ee4" (UID: "e99dfaa4-d2d7-411a-bea7-4a4768c31ee4"). InnerVolumeSpecName "kube-api-access-v2m8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.308100 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.361696 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-config" (OuterVolumeSpecName: "config") pod "e99dfaa4-d2d7-411a-bea7-4a4768c31ee4" (UID: "e99dfaa4-d2d7-411a-bea7-4a4768c31ee4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.397736 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2m8v\" (UniqueName: \"kubernetes.io/projected/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-kube-api-access-v2m8v\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.398029 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.439757 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e99dfaa4-d2d7-411a-bea7-4a4768c31ee4" (UID: "e99dfaa4-d2d7-411a-bea7-4a4768c31ee4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.468690 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58d5d7b545-z5bkh"] Sep 29 14:01:36 crc kubenswrapper[4869]: E0929 14:01:36.469267 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e99dfaa4-d2d7-411a-bea7-4a4768c31ee4" containerName="neutron-db-sync" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.469287 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e99dfaa4-d2d7-411a-bea7-4a4768c31ee4" containerName="neutron-db-sync" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.469513 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e99dfaa4-d2d7-411a-bea7-4a4768c31ee4" containerName="neutron-db-sync" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.470954 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.487367 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58d5d7b545-z5bkh"] Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.500735 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.530572 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-nmzfx" event={"ID":"250a15d6-2b1f-4b59-9564-7c7240c9b84e","Type":"ContainerStarted","Data":"c84fd0ff80087f86194e3d7952c169c32bbeb8f29e2fbefbaa287783c29b1a76"} Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.560847 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7bd86d5cfd-4t4bw"] Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.571802 4869 generic.go:334] "Generic (PLEG): container finished" podID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerID="7af938c4f2a8968460970d78f7b7003bda4558e9f37d9e0f51740a673ee10595" exitCode=143 Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.571877 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74d588c6fd-2lr92" event={"ID":"cf6d273a-a151-4bfc-af19-0012e08ec38d","Type":"ContainerDied","Data":"7af938c4f2a8968460970d78f7b7003bda4558e9f37d9e0f51740a673ee10595"} Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.579687 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.580697 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7bd86d5cfd-4t4bw"] Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.589669 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.596908 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-nmzfx" podStartSLOduration=8.985233863 podStartE2EDuration="44.596890829s" podCreationTimestamp="2025-09-29 14:00:52 +0000 UTC" firstStartedPulling="2025-09-29 14:00:58.956251272 +0000 UTC m=+1185.396895592" lastFinishedPulling="2025-09-29 14:01:34.567908238 +0000 UTC m=+1221.008552558" observedRunningTime="2025-09-29 14:01:36.567469814 +0000 UTC m=+1223.008114134" watchObservedRunningTime="2025-09-29 14:01:36.596890829 +0000 UTC m=+1223.037535149" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.606882 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-config\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.606945 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-sb\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.607018 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5spf\" (UniqueName: \"kubernetes.io/projected/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-kube-api-access-c5spf\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.607042 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-dns-svc\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.607066 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-nb\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.624096 4869 generic.go:334] "Generic (PLEG): container finished" podID="83e71a75-00ee-4764-83ce-1ca265589a29" containerID="132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde" exitCode=0 Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.624128 4869 generic.go:334] "Generic (PLEG): container finished" podID="83e71a75-00ee-4764-83ce-1ca265589a29" containerID="b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a" exitCode=2 Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.624137 4869 generic.go:334] "Generic (PLEG): container finished" podID="83e71a75-00ee-4764-83ce-1ca265589a29" containerID="11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420" exitCode=0 Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.624192 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerDied","Data":"132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde"} Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.624226 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerDied","Data":"b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a"} Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.624238 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerDied","Data":"11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420"} Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.638891 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-h5s6m" event={"ID":"e99dfaa4-d2d7-411a-bea7-4a4768c31ee4","Type":"ContainerDied","Data":"819de13cb82997f5e9bbb2d01854cc72bf85794343a8dfaf3e3c2a9bcd703149"} Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.638927 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="819de13cb82997f5e9bbb2d01854cc72bf85794343a8dfaf3e3c2a9bcd703149" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.638984 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-h5s6m" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709213 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-config\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709269 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-sb\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709314 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-combined-ca-bundle\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709334 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-httpd-config\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709389 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5spf\" (UniqueName: \"kubernetes.io/projected/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-kube-api-access-c5spf\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709412 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-dns-svc\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709437 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-nb\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709469 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-ovndb-tls-certs\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709489 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc6l9\" (UniqueName: \"kubernetes.io/projected/472c07ff-186c-458b-ad02-d616f6a6dfca-kube-api-access-vc6l9\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.709566 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-config\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.710388 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-config\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.711915 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-sb\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.712836 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-dns-svc\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.713357 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-nb\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.756112 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.756987 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5spf\" (UniqueName: \"kubernetes.io/projected/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-kube-api-access-c5spf\") pod \"dnsmasq-dns-58d5d7b545-z5bkh\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.813490 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-config\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.813573 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-combined-ca-bundle\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.813602 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-httpd-config\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.813692 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-ovndb-tls-certs\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.813710 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc6l9\" (UniqueName: \"kubernetes.io/projected/472c07ff-186c-458b-ad02-d616f6a6dfca-kube-api-access-vc6l9\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.818685 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-combined-ca-bundle\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.821953 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.822299 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-config\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.822621 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.829174 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-ovndb-tls-certs\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.833083 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc6l9\" (UniqueName: \"kubernetes.io/projected/472c07ff-186c-458b-ad02-d616f6a6dfca-kube-api-access-vc6l9\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.856257 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-httpd-config\") pod \"neutron-7bd86d5cfd-4t4bw\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:36 crc kubenswrapper[4869]: I0929 14:01:36.987398 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.058997 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.182243 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.337244 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tr55n\" (UniqueName: \"kubernetes.io/projected/83e71a75-00ee-4764-83ce-1ca265589a29-kube-api-access-tr55n\") pod \"83e71a75-00ee-4764-83ce-1ca265589a29\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.337577 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-combined-ca-bundle\") pod \"83e71a75-00ee-4764-83ce-1ca265589a29\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.337627 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-scripts\") pod \"83e71a75-00ee-4764-83ce-1ca265589a29\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.337679 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-run-httpd\") pod \"83e71a75-00ee-4764-83ce-1ca265589a29\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.337784 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-log-httpd\") pod \"83e71a75-00ee-4764-83ce-1ca265589a29\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.337855 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-config-data\") pod \"83e71a75-00ee-4764-83ce-1ca265589a29\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.337939 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-sg-core-conf-yaml\") pod \"83e71a75-00ee-4764-83ce-1ca265589a29\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.341651 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "83e71a75-00ee-4764-83ce-1ca265589a29" (UID: "83e71a75-00ee-4764-83ce-1ca265589a29"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.341923 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "83e71a75-00ee-4764-83ce-1ca265589a29" (UID: "83e71a75-00ee-4764-83ce-1ca265589a29"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.349018 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-scripts" (OuterVolumeSpecName: "scripts") pod "83e71a75-00ee-4764-83ce-1ca265589a29" (UID: "83e71a75-00ee-4764-83ce-1ca265589a29"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.361693 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83e71a75-00ee-4764-83ce-1ca265589a29-kube-api-access-tr55n" (OuterVolumeSpecName: "kube-api-access-tr55n") pod "83e71a75-00ee-4764-83ce-1ca265589a29" (UID: "83e71a75-00ee-4764-83ce-1ca265589a29"). InnerVolumeSpecName "kube-api-access-tr55n". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.440499 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "83e71a75-00ee-4764-83ce-1ca265589a29" (UID: "83e71a75-00ee-4764-83ce-1ca265589a29"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.440739 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-sg-core-conf-yaml\") pod \"83e71a75-00ee-4764-83ce-1ca265589a29\" (UID: \"83e71a75-00ee-4764-83ce-1ca265589a29\") " Sep 29 14:01:37 crc kubenswrapper[4869]: W0929 14:01:37.440856 4869 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/83e71a75-00ee-4764-83ce-1ca265589a29/volumes/kubernetes.io~secret/sg-core-conf-yaml Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.440872 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "83e71a75-00ee-4764-83ce-1ca265589a29" (UID: "83e71a75-00ee-4764-83ce-1ca265589a29"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.441297 4869 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.441331 4869 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.441344 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tr55n\" (UniqueName: \"kubernetes.io/projected/83e71a75-00ee-4764-83ce-1ca265589a29-kube-api-access-tr55n\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.441353 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.441363 4869 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83e71a75-00ee-4764-83ce-1ca265589a29-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.445534 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.446148 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.446210 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.446496 4869 scope.go:117] "RemoveContainer" containerID="856118de09e84cea6dbacafc8467b52022c3bc2ee7b187b1ad9cc21e8d95c937" Sep 29 14:01:37 crc kubenswrapper[4869]: E0929 14:01:37.446787 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(103100a6-0dbb-481c-ba0e-4e7a2e5c38f6)\"" pod="openstack/watcher-decision-engine-0" podUID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.560632 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83e71a75-00ee-4764-83ce-1ca265589a29" (UID: "83e71a75-00ee-4764-83ce-1ca265589a29"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.562068 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58d5d7b545-z5bkh"] Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.645999 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.659583 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" event={"ID":"8e4c81be-4e2d-4ae7-957c-bc6f257305e7","Type":"ContainerStarted","Data":"1db5c66cbcc34c699ba17a6319128017d0ad7777d48bccdc6a5657c05e85856a"} Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.661925 4869 generic.go:334] "Generic (PLEG): container finished" podID="83e71a75-00ee-4764-83ce-1ca265589a29" containerID="ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42" exitCode=0 Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.662017 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerDied","Data":"ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42"} Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.662073 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83e71a75-00ee-4764-83ce-1ca265589a29","Type":"ContainerDied","Data":"b6b6b39b4a8d6f9ba02e9e7369c42a56c64d09b3c0e416590dec51e9d688ad8c"} Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.662090 4869 scope.go:117] "RemoveContainer" containerID="132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.662266 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.675701 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-config-data" (OuterVolumeSpecName: "config-data") pod "83e71a75-00ee-4764-83ce-1ca265589a29" (UID: "83e71a75-00ee-4764-83ce-1ca265589a29"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.687013 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"2bccd1d9-e5f1-4608-ac93-e364aac95e6c","Type":"ContainerStarted","Data":"8e2dca96ff35b1b727916a30f5ad3280441437fc25481b0b0aadbff87084d302"} Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.687054 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"2bccd1d9-e5f1-4608-ac93-e364aac95e6c","Type":"ContainerStarted","Data":"3ad98515258c2dd6db8fec386225a9756cfa37599be2f49f6c91c43fc0e1a408"} Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.718484 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=2.718460621 podStartE2EDuration="2.718460621s" podCreationTimestamp="2025-09-29 14:01:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:37.71380023 +0000 UTC m=+1224.154444560" watchObservedRunningTime="2025-09-29 14:01:37.718460621 +0000 UTC m=+1224.159104941" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.758561 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e71a75-00ee-4764-83ce-1ca265589a29-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.773478 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"95555888-1c52-4e91-ac6c-b85c38094784","Type":"ContainerStarted","Data":"9b83105af6f735b4ba5983fd7f780cd6366877e036ca70fdcb7d2d6890fa0fce"} Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.773518 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"95555888-1c52-4e91-ac6c-b85c38094784","Type":"ContainerStarted","Data":"09bf0fd4abf51733f0f6a4c0b903b73b3707480bb84bd014954f7aafa8f8e4a2"} Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.773533 4869 scope.go:117] "RemoveContainer" containerID="856118de09e84cea6dbacafc8467b52022c3bc2ee7b187b1ad9cc21e8d95c937" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.778819 4869 scope.go:117] "RemoveContainer" containerID="b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.862471 4869 scope.go:117] "RemoveContainer" containerID="ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.956033 4869 scope.go:117] "RemoveContainer" containerID="11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420" Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.991266 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7bd86d5cfd-4t4bw"] Sep 29 14:01:37 crc kubenswrapper[4869]: I0929 14:01:37.998808 4869 scope.go:117] "RemoveContainer" containerID="132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde" Sep 29 14:01:38 crc kubenswrapper[4869]: E0929 14:01:38.002712 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde\": container with ID starting with 132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde not found: ID does not exist" containerID="132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.002755 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde"} err="failed to get container status \"132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde\": rpc error: code = NotFound desc = could not find container \"132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde\": container with ID starting with 132342ba4b24fcbfbaf9bf3975b0e5d8656e379f9761566e146b904343417bde not found: ID does not exist" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.002809 4869 scope.go:117] "RemoveContainer" containerID="b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a" Sep 29 14:01:38 crc kubenswrapper[4869]: E0929 14:01:38.011780 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a\": container with ID starting with b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a not found: ID does not exist" containerID="b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.011824 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a"} err="failed to get container status \"b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a\": rpc error: code = NotFound desc = could not find container \"b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a\": container with ID starting with b48f1b1053f9d7a4bedd982b341f459374ac89058e495fba1979e5463e931c8a not found: ID does not exist" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.011854 4869 scope.go:117] "RemoveContainer" containerID="ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42" Sep 29 14:01:38 crc kubenswrapper[4869]: E0929 14:01:38.024777 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42\": container with ID starting with ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42 not found: ID does not exist" containerID="ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.024828 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42"} err="failed to get container status \"ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42\": rpc error: code = NotFound desc = could not find container \"ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42\": container with ID starting with ea8bc60d90d3a698d79d4704060828750bd2251a85891a438a814ebbe9752e42 not found: ID does not exist" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.024856 4869 scope.go:117] "RemoveContainer" containerID="11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420" Sep 29 14:01:38 crc kubenswrapper[4869]: E0929 14:01:38.026761 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420\": container with ID starting with 11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420 not found: ID does not exist" containerID="11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.026784 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420"} err="failed to get container status \"11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420\": rpc error: code = NotFound desc = could not find container \"11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420\": container with ID starting with 11def58570a0248f421942e04b82e383f8dd497c78d68b8cbda36ea072e0d420 not found: ID does not exist" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.056909 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.084504 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.105698 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:01:38 crc kubenswrapper[4869]: E0929 14:01:38.106166 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="ceilometer-central-agent" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.106186 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="ceilometer-central-agent" Sep 29 14:01:38 crc kubenswrapper[4869]: E0929 14:01:38.106199 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="proxy-httpd" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.106206 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="proxy-httpd" Sep 29 14:01:38 crc kubenswrapper[4869]: E0929 14:01:38.106216 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="sg-core" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.106224 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="sg-core" Sep 29 14:01:38 crc kubenswrapper[4869]: E0929 14:01:38.106257 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="ceilometer-notification-agent" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.106267 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="ceilometer-notification-agent" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.106452 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="ceilometer-notification-agent" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.106479 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="proxy-httpd" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.106493 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="ceilometer-central-agent" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.106504 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" containerName="sg-core" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.108355 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.115130 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.115386 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.131013 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.178855 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.178966 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-config-data\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.179007 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-scripts\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.179031 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-log-httpd\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.179049 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-run-httpd\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.179069 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zgsx\" (UniqueName: \"kubernetes.io/projected/f55548dd-0cb5-4749-a08b-cba0212a56ae-kube-api-access-9zgsx\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.179123 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.273210 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83e71a75-00ee-4764-83ce-1ca265589a29" path="/var/lib/kubelet/pods/83e71a75-00ee-4764-83ce-1ca265589a29/volumes" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.285422 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-config-data\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.285484 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-scripts\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.285508 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-log-httpd\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.285528 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-run-httpd\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.285549 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zgsx\" (UniqueName: \"kubernetes.io/projected/f55548dd-0cb5-4749-a08b-cba0212a56ae-kube-api-access-9zgsx\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.285665 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.285710 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.287915 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-run-httpd\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.292824 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-log-httpd\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.298709 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.309275 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-scripts\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.319416 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.319967 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zgsx\" (UniqueName: \"kubernetes.io/projected/f55548dd-0cb5-4749-a08b-cba0212a56ae-kube-api-access-9zgsx\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.334525 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-config-data\") pod \"ceilometer-0\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.450366 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.504290 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74d588c6fd-2lr92" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:34202->10.217.0.164:9311: read: connection reset by peer" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.504563 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74d588c6fd-2lr92" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:34212->10.217.0.164:9311: read: connection reset by peer" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.904848 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6","Type":"ContainerStarted","Data":"e91301f6e1e4263795b2bf9ee37c82f8ba38232b443c009f3f84db5aec3f0193"} Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.911706 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"95555888-1c52-4e91-ac6c-b85c38094784","Type":"ContainerStarted","Data":"e26b985643b3303140454047b60dd8c5329e22034a21fb7a1f23b0ef4ba6a5d7"} Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.913000 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.921637 4869 generic.go:334] "Generic (PLEG): container finished" podID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerID="6ec1366bdd397162419d2bc9765d94183846b57299d355daecf0ab34b8fb51dc" exitCode=0 Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.921713 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74d588c6fd-2lr92" event={"ID":"cf6d273a-a151-4bfc-af19-0012e08ec38d","Type":"ContainerDied","Data":"6ec1366bdd397162419d2bc9765d94183846b57299d355daecf0ab34b8fb51dc"} Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.936959 4869 generic.go:334] "Generic (PLEG): container finished" podID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerID="91f6eb5c8a4a17f330260e4584c9c7e0690ef137a9c4b1b6cf593fa8bd4c28e6" exitCode=0 Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.937055 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" event={"ID":"8e4c81be-4e2d-4ae7-957c-bc6f257305e7","Type":"ContainerDied","Data":"91f6eb5c8a4a17f330260e4584c9c7e0690ef137a9c4b1b6cf593fa8bd4c28e6"} Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.949638 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bd86d5cfd-4t4bw" event={"ID":"472c07ff-186c-458b-ad02-d616f6a6dfca","Type":"ContainerStarted","Data":"7f1405eb99d2070fe8a5854d67c480badadc78612d569db616b4d9a2b4424b14"} Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.949679 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bd86d5cfd-4t4bw" event={"ID":"472c07ff-186c-458b-ad02-d616f6a6dfca","Type":"ContainerStarted","Data":"2a953683bb2bef1805b01a2cd2cc5c4036459ae72d1379e523ad304e530d9a0b"} Sep 29 14:01:38 crc kubenswrapper[4869]: I0929 14:01:38.970174 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=3.970156049 podStartE2EDuration="3.970156049s" podCreationTimestamp="2025-09-29 14:01:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:38.962176491 +0000 UTC m=+1225.402820811" watchObservedRunningTime="2025-09-29 14:01:38.970156049 +0000 UTC m=+1225.410800369" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.212929 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.326296 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.396761 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-984cbc747-hdvk6"] Sep 29 14:01:39 crc kubenswrapper[4869]: E0929 14:01:39.397292 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.397305 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api" Sep 29 14:01:39 crc kubenswrapper[4869]: E0929 14:01:39.397314 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api-log" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.397321 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api-log" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.397540 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api-log" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.397565 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" containerName="barbican-api" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.398698 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.403016 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.403384 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.416307 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-984cbc747-hdvk6"] Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.430356 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data\") pod \"cf6d273a-a151-4bfc-af19-0012e08ec38d\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.430428 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-combined-ca-bundle\") pod \"cf6d273a-a151-4bfc-af19-0012e08ec38d\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.430498 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data-custom\") pod \"cf6d273a-a151-4bfc-af19-0012e08ec38d\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.430557 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf6d273a-a151-4bfc-af19-0012e08ec38d-logs\") pod \"cf6d273a-a151-4bfc-af19-0012e08ec38d\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.430578 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkf5z\" (UniqueName: \"kubernetes.io/projected/cf6d273a-a151-4bfc-af19-0012e08ec38d-kube-api-access-qkf5z\") pod \"cf6d273a-a151-4bfc-af19-0012e08ec38d\" (UID: \"cf6d273a-a151-4bfc-af19-0012e08ec38d\") " Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.431760 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf6d273a-a151-4bfc-af19-0012e08ec38d-logs" (OuterVolumeSpecName: "logs") pod "cf6d273a-a151-4bfc-af19-0012e08ec38d" (UID: "cf6d273a-a151-4bfc-af19-0012e08ec38d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.432269 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf6d273a-a151-4bfc-af19-0012e08ec38d-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.436044 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cf6d273a-a151-4bfc-af19-0012e08ec38d" (UID: "cf6d273a-a151-4bfc-af19-0012e08ec38d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.436190 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf6d273a-a151-4bfc-af19-0012e08ec38d-kube-api-access-qkf5z" (OuterVolumeSpecName: "kube-api-access-qkf5z") pod "cf6d273a-a151-4bfc-af19-0012e08ec38d" (UID: "cf6d273a-a151-4bfc-af19-0012e08ec38d"). InnerVolumeSpecName "kube-api-access-qkf5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.464747 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf6d273a-a151-4bfc-af19-0012e08ec38d" (UID: "cf6d273a-a151-4bfc-af19-0012e08ec38d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.502901 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data" (OuterVolumeSpecName: "config-data") pod "cf6d273a-a151-4bfc-af19-0012e08ec38d" (UID: "cf6d273a-a151-4bfc-af19-0012e08ec38d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534171 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-ovndb-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534232 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-httpd-config\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534300 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-internal-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534352 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-config\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534401 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-combined-ca-bundle\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534456 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-public-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534533 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw964\" (UniqueName: \"kubernetes.io/projected/01428588-d0f3-4d76-b537-2daec9cefe31-kube-api-access-qw964\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534689 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkf5z\" (UniqueName: \"kubernetes.io/projected/cf6d273a-a151-4bfc-af19-0012e08ec38d-kube-api-access-qkf5z\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534715 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534728 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.534740 4869 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cf6d273a-a151-4bfc-af19-0012e08ec38d-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.636860 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-ovndb-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.637144 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-httpd-config\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.637207 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-internal-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.637232 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-config\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.637256 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-combined-ca-bundle\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.637314 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-public-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.637378 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw964\" (UniqueName: \"kubernetes.io/projected/01428588-d0f3-4d76-b537-2daec9cefe31-kube-api-access-qw964\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.643477 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-config\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.643736 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-combined-ca-bundle\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.644179 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-internal-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.644683 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-httpd-config\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.648097 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-public-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.674461 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/01428588-d0f3-4d76-b537-2daec9cefe31-ovndb-tls-certs\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.678334 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw964\" (UniqueName: \"kubernetes.io/projected/01428588-d0f3-4d76-b537-2daec9cefe31-kube-api-access-qw964\") pod \"neutron-984cbc747-hdvk6\" (UID: \"01428588-d0f3-4d76-b537-2daec9cefe31\") " pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.719503 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.968007 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" event={"ID":"8e4c81be-4e2d-4ae7-957c-bc6f257305e7","Type":"ContainerStarted","Data":"491940812e1be68d478139536f9e56d453d0bbaa55b31c7de0821a82a3941add"} Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.968453 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.983294 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bd86d5cfd-4t4bw" event={"ID":"472c07ff-186c-458b-ad02-d616f6a6dfca","Type":"ContainerStarted","Data":"7ffb4210b69afe8718ce5ce3fb257363dc978f160ff09dc75edb17969f1543a8"} Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.983917 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.988575 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74d588c6fd-2lr92" event={"ID":"cf6d273a-a151-4bfc-af19-0012e08ec38d","Type":"ContainerDied","Data":"5ceec85d139405299472a60150701c5e3780e508d10f1c6b5d2796b03ad8a18b"} Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.988631 4869 scope.go:117] "RemoveContainer" containerID="6ec1366bdd397162419d2bc9765d94183846b57299d355daecf0ab34b8fb51dc" Sep 29 14:01:39 crc kubenswrapper[4869]: I0929 14:01:39.988727 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74d588c6fd-2lr92" Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.006676 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" podStartSLOduration=4.006659057 podStartE2EDuration="4.006659057s" podCreationTimestamp="2025-09-29 14:01:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:40.000471797 +0000 UTC m=+1226.441116127" watchObservedRunningTime="2025-09-29 14:01:40.006659057 +0000 UTC m=+1226.447303377" Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.009498 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerStarted","Data":"cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261"} Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.009529 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerStarted","Data":"005303ae954094a1872f5757eb18c4949e6269fec32bbbca8cc3fbaa4a5d8bca"} Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.072857 4869 scope.go:117] "RemoveContainer" containerID="7af938c4f2a8968460970d78f7b7003bda4558e9f37d9e0f51740a673ee10595" Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.073365 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7bd86d5cfd-4t4bw" podStartSLOduration=4.073355792 podStartE2EDuration="4.073355792s" podCreationTimestamp="2025-09-29 14:01:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:40.072250313 +0000 UTC m=+1226.512894633" watchObservedRunningTime="2025-09-29 14:01:40.073355792 +0000 UTC m=+1226.514000112" Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.136375 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-74d588c6fd-2lr92"] Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.170048 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-74d588c6fd-2lr92"] Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.293083 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf6d273a-a151-4bfc-af19-0012e08ec38d" path="/var/lib/kubelet/pods/cf6d273a-a151-4bfc-af19-0012e08ec38d/volumes" Sep 29 14:01:40 crc kubenswrapper[4869]: W0929 14:01:40.337667 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01428588_d0f3_4d76_b537_2daec9cefe31.slice/crio-b2c8e3aec9e69c708e0d993894f1c563082047188e904053d348b34e03634eeb WatchSource:0}: Error finding container b2c8e3aec9e69c708e0d993894f1c563082047188e904053d348b34e03634eeb: Status 404 returned error can't find the container with id b2c8e3aec9e69c708e0d993894f1c563082047188e904053d348b34e03634eeb Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.338165 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-984cbc747-hdvk6"] Sep 29 14:01:40 crc kubenswrapper[4869]: I0929 14:01:40.958326 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.020553 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-984cbc747-hdvk6" event={"ID":"01428588-d0f3-4d76-b537-2daec9cefe31","Type":"ContainerStarted","Data":"a98c937edd1311c2e7c71aac79a4ff764ba0c5a9f2dd455089bf5f1b84f72108"} Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.020600 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-984cbc747-hdvk6" event={"ID":"01428588-d0f3-4d76-b537-2daec9cefe31","Type":"ContainerStarted","Data":"0364055b4b74d191cdb0d1cfacae27a093b41de896925200455829bc57215ade"} Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.020637 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-984cbc747-hdvk6" event={"ID":"01428588-d0f3-4d76-b537-2daec9cefe31","Type":"ContainerStarted","Data":"b2c8e3aec9e69c708e0d993894f1c563082047188e904053d348b34e03634eeb"} Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.022275 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.028670 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerStarted","Data":"f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691"} Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.028899 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerStarted","Data":"1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4"} Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.028917 4869 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.065143 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-984cbc747-hdvk6" podStartSLOduration=2.065122987 podStartE2EDuration="2.065122987s" podCreationTimestamp="2025-09-29 14:01:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:41.058572436 +0000 UTC m=+1227.499216756" watchObservedRunningTime="2025-09-29 14:01:41.065122987 +0000 UTC m=+1227.505767307" Sep 29 14:01:41 crc kubenswrapper[4869]: I0929 14:01:41.309151 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Sep 29 14:01:42 crc kubenswrapper[4869]: I0929 14:01:42.111622 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Sep 29 14:01:43 crc kubenswrapper[4869]: I0929 14:01:43.055232 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerStarted","Data":"3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430"} Sep 29 14:01:43 crc kubenswrapper[4869]: I0929 14:01:43.055723 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:01:43 crc kubenswrapper[4869]: I0929 14:01:43.058421 4869 generic.go:334] "Generic (PLEG): container finished" podID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" containerID="e91301f6e1e4263795b2bf9ee37c82f8ba38232b443c009f3f84db5aec3f0193" exitCode=1 Sep 29 14:01:43 crc kubenswrapper[4869]: I0929 14:01:43.058508 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6","Type":"ContainerDied","Data":"e91301f6e1e4263795b2bf9ee37c82f8ba38232b443c009f3f84db5aec3f0193"} Sep 29 14:01:43 crc kubenswrapper[4869]: I0929 14:01:43.058547 4869 scope.go:117] "RemoveContainer" containerID="856118de09e84cea6dbacafc8467b52022c3bc2ee7b187b1ad9cc21e8d95c937" Sep 29 14:01:43 crc kubenswrapper[4869]: I0929 14:01:43.059866 4869 scope.go:117] "RemoveContainer" containerID="e91301f6e1e4263795b2bf9ee37c82f8ba38232b443c009f3f84db5aec3f0193" Sep 29 14:01:43 crc kubenswrapper[4869]: E0929 14:01:43.060172 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(103100a6-0dbb-481c-ba0e-4e7a2e5c38f6)\"" pod="openstack/watcher-decision-engine-0" podUID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" Sep 29 14:01:43 crc kubenswrapper[4869]: I0929 14:01:43.083272 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.12779168 podStartE2EDuration="5.083246756s" podCreationTimestamp="2025-09-29 14:01:38 +0000 UTC" firstStartedPulling="2025-09-29 14:01:39.252048244 +0000 UTC m=+1225.692692564" lastFinishedPulling="2025-09-29 14:01:42.20750332 +0000 UTC m=+1228.648147640" observedRunningTime="2025-09-29 14:01:43.076149801 +0000 UTC m=+1229.516794121" watchObservedRunningTime="2025-09-29 14:01:43.083246756 +0000 UTC m=+1229.523891076" Sep 29 14:01:44 crc kubenswrapper[4869]: I0929 14:01:44.071470 4869 generic.go:334] "Generic (PLEG): container finished" podID="250a15d6-2b1f-4b59-9564-7c7240c9b84e" containerID="c84fd0ff80087f86194e3d7952c169c32bbeb8f29e2fbefbaa287783c29b1a76" exitCode=0 Sep 29 14:01:44 crc kubenswrapper[4869]: I0929 14:01:44.071548 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-nmzfx" event={"ID":"250a15d6-2b1f-4b59-9564-7c7240c9b84e","Type":"ContainerDied","Data":"c84fd0ff80087f86194e3d7952c169c32bbeb8f29e2fbefbaa287783c29b1a76"} Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.485247 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.665767 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-scripts\") pod \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.665943 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/250a15d6-2b1f-4b59-9564-7c7240c9b84e-etc-machine-id\") pod \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.665970 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-config-data\") pod \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.666021 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-db-sync-config-data\") pod \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.666027 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/250a15d6-2b1f-4b59-9564-7c7240c9b84e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "250a15d6-2b1f-4b59-9564-7c7240c9b84e" (UID: "250a15d6-2b1f-4b59-9564-7c7240c9b84e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.666161 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8zq4\" (UniqueName: \"kubernetes.io/projected/250a15d6-2b1f-4b59-9564-7c7240c9b84e-kube-api-access-l8zq4\") pod \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.666208 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-combined-ca-bundle\") pod \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\" (UID: \"250a15d6-2b1f-4b59-9564-7c7240c9b84e\") " Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.666773 4869 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/250a15d6-2b1f-4b59-9564-7c7240c9b84e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.688821 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/250a15d6-2b1f-4b59-9564-7c7240c9b84e-kube-api-access-l8zq4" (OuterVolumeSpecName: "kube-api-access-l8zq4") pod "250a15d6-2b1f-4b59-9564-7c7240c9b84e" (UID: "250a15d6-2b1f-4b59-9564-7c7240c9b84e"). InnerVolumeSpecName "kube-api-access-l8zq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.694763 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "250a15d6-2b1f-4b59-9564-7c7240c9b84e" (UID: "250a15d6-2b1f-4b59-9564-7c7240c9b84e"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.719856 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-scripts" (OuterVolumeSpecName: "scripts") pod "250a15d6-2b1f-4b59-9564-7c7240c9b84e" (UID: "250a15d6-2b1f-4b59-9564-7c7240c9b84e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.765005 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "250a15d6-2b1f-4b59-9564-7c7240c9b84e" (UID: "250a15d6-2b1f-4b59-9564-7c7240c9b84e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.768890 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8zq4\" (UniqueName: \"kubernetes.io/projected/250a15d6-2b1f-4b59-9564-7c7240c9b84e-kube-api-access-l8zq4\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.768911 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.768920 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.768929 4869 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.793589 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-config-data" (OuterVolumeSpecName: "config-data") pod "250a15d6-2b1f-4b59-9564-7c7240c9b84e" (UID: "250a15d6-2b1f-4b59-9564-7c7240c9b84e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.871018 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/250a15d6-2b1f-4b59-9564-7c7240c9b84e-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.959118 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Sep 29 14:01:45 crc kubenswrapper[4869]: I0929 14:01:45.968645 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.096676 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-nmzfx" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.107011 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-nmzfx" event={"ID":"250a15d6-2b1f-4b59-9564-7c7240c9b84e","Type":"ContainerDied","Data":"e22348f2c1ae2c5248fbbe81bf590f76cd7593196ea7bed1e4e14cad2e74562d"} Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.107077 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e22348f2c1ae2c5248fbbe81bf590f76cd7593196ea7bed1e4e14cad2e74562d" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.145178 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.309224 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.313600 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:01:46 crc kubenswrapper[4869]: E0929 14:01:46.314470 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="250a15d6-2b1f-4b59-9564-7c7240c9b84e" containerName="cinder-db-sync" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.314485 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="250a15d6-2b1f-4b59-9564-7c7240c9b84e" containerName="cinder-db-sync" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.314706 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="250a15d6-2b1f-4b59-9564-7c7240c9b84e" containerName="cinder-db-sync" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.315908 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.322792 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-q2nz9" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.323012 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.323184 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.323777 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.357252 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.387930 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.475647 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58d5d7b545-z5bkh"] Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.475983 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" podUID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerName="dnsmasq-dns" containerID="cri-o://491940812e1be68d478139536f9e56d453d0bbaa55b31c7de0821a82a3941add" gracePeriod=10 Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.486944 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.494563 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.498813 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66xxl\" (UniqueName: \"kubernetes.io/projected/5a5310c6-8efb-4909-9c4b-b52c08dd2839-kube-api-access-66xxl\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.498979 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-scripts\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.499148 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.499297 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.499326 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a5310c6-8efb-4909-9c4b-b52c08dd2839-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.580782 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f66b6f785-jzv4c"] Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.582539 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.598772 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f66b6f785-jzv4c"] Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.603018 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.603151 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66xxl\" (UniqueName: \"kubernetes.io/projected/5a5310c6-8efb-4909-9c4b-b52c08dd2839-kube-api-access-66xxl\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.603188 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-scripts\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.603348 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.603540 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.603601 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a5310c6-8efb-4909-9c4b-b52c08dd2839-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.605111 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a5310c6-8efb-4909-9c4b-b52c08dd2839-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.611118 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.610783 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.624798 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-scripts\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.626424 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.639320 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66xxl\" (UniqueName: \"kubernetes.io/projected/5a5310c6-8efb-4909-9c4b-b52c08dd2839-kube-api-access-66xxl\") pod \"cinder-scheduler-0\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.645464 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.647818 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.651037 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.663064 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.679139 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.708301 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-sb\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.708748 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-nb\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.708795 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-config\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.708858 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5g4x\" (UniqueName: \"kubernetes.io/projected/9f660561-4e72-4887-a736-ba1d7c48ba95-kube-api-access-j5g4x\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.708886 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-dns-svc\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.811952 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-sb\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812050 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c90fd7a-697e-4f45-b219-97b334325c97-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812081 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-scripts\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812103 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data-custom\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812160 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xnvz\" (UniqueName: \"kubernetes.io/projected/4c90fd7a-697e-4f45-b219-97b334325c97-kube-api-access-9xnvz\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812182 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812231 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-nb\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812280 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812310 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-config\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812338 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c90fd7a-697e-4f45-b219-97b334325c97-logs\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812391 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5g4x\" (UniqueName: \"kubernetes.io/projected/9f660561-4e72-4887-a736-ba1d7c48ba95-kube-api-access-j5g4x\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.812419 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-dns-svc\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.813514 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-sb\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.813597 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-dns-svc\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.814061 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-config\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.823287 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-nb\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.832285 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5g4x\" (UniqueName: \"kubernetes.io/projected/9f660561-4e72-4887-a736-ba1d7c48ba95-kube-api-access-j5g4x\") pod \"dnsmasq-dns-f66b6f785-jzv4c\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.914681 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xnvz\" (UniqueName: \"kubernetes.io/projected/4c90fd7a-697e-4f45-b219-97b334325c97-kube-api-access-9xnvz\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.914724 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.914781 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.914810 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c90fd7a-697e-4f45-b219-97b334325c97-logs\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.914938 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c90fd7a-697e-4f45-b219-97b334325c97-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.914957 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-scripts\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.914972 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data-custom\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.919499 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.919634 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c90fd7a-697e-4f45-b219-97b334325c97-logs\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.919885 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c90fd7a-697e-4f45-b219-97b334325c97-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.921918 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data-custom\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.929835 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-scripts\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.932903 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xnvz\" (UniqueName: \"kubernetes.io/projected/4c90fd7a-697e-4f45-b219-97b334325c97-kube-api-access-9xnvz\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:46 crc kubenswrapper[4869]: I0929 14:01:46.934378 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data\") pod \"cinder-api-0\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " pod="openstack/cinder-api-0" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.096748 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.107294 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.114238 4869 generic.go:334] "Generic (PLEG): container finished" podID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerID="491940812e1be68d478139536f9e56d453d0bbaa55b31c7de0821a82a3941add" exitCode=0 Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.115146 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" event={"ID":"8e4c81be-4e2d-4ae7-957c-bc6f257305e7","Type":"ContainerDied","Data":"491940812e1be68d478139536f9e56d453d0bbaa55b31c7de0821a82a3941add"} Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.200444 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.212424 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.330247 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-config\") pod \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.330418 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-sb\") pod \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.330450 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-dns-svc\") pod \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.330506 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-nb\") pod \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.330538 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5spf\" (UniqueName: \"kubernetes.io/projected/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-kube-api-access-c5spf\") pod \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\" (UID: \"8e4c81be-4e2d-4ae7-957c-bc6f257305e7\") " Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.345888 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-kube-api-access-c5spf" (OuterVolumeSpecName: "kube-api-access-c5spf") pod "8e4c81be-4e2d-4ae7-957c-bc6f257305e7" (UID: "8e4c81be-4e2d-4ae7-957c-bc6f257305e7"). InnerVolumeSpecName "kube-api-access-c5spf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.370003 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.405303 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8e4c81be-4e2d-4ae7-957c-bc6f257305e7" (UID: "8e4c81be-4e2d-4ae7-957c-bc6f257305e7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.448171 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.448471 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.449250 4869 scope.go:117] "RemoveContainer" containerID="e91301f6e1e4263795b2bf9ee37c82f8ba38232b443c009f3f84db5aec3f0193" Sep 29 14:01:47 crc kubenswrapper[4869]: E0929 14:01:47.449455 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(103100a6-0dbb-481c-ba0e-4e7a2e5c38f6)\"" pod="openstack/watcher-decision-engine-0" podUID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.450224 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.452162 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5spf\" (UniqueName: \"kubernetes.io/projected/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-kube-api-access-c5spf\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.473881 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8e4c81be-4e2d-4ae7-957c-bc6f257305e7" (UID: "8e4c81be-4e2d-4ae7-957c-bc6f257305e7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.502423 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-config" (OuterVolumeSpecName: "config") pod "8e4c81be-4e2d-4ae7-957c-bc6f257305e7" (UID: "8e4c81be-4e2d-4ae7-957c-bc6f257305e7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.539539 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8e4c81be-4e2d-4ae7-957c-bc6f257305e7" (UID: "8e4c81be-4e2d-4ae7-957c-bc6f257305e7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.555077 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.555112 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.555126 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e4c81be-4e2d-4ae7-957c-bc6f257305e7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.890856 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:47 crc kubenswrapper[4869]: I0929 14:01:47.933066 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f66b6f785-jzv4c"] Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.205844 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5a5310c6-8efb-4909-9c4b-b52c08dd2839","Type":"ContainerStarted","Data":"97cedc85589cabbfb9bda9ead18a30f6d1829011a981d0ac1e9aeb1b880f1b0d"} Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.238554 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" event={"ID":"9f660561-4e72-4887-a736-ba1d7c48ba95","Type":"ContainerStarted","Data":"e4180c10400810e53e3b57c4efd404463e76364fe12704f970b354cdcf4b82c7"} Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.250568 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.269875 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" event={"ID":"8e4c81be-4e2d-4ae7-957c-bc6f257305e7","Type":"ContainerDied","Data":"1db5c66cbcc34c699ba17a6319128017d0ad7777d48bccdc6a5657c05e85856a"} Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.269990 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4c90fd7a-697e-4f45-b219-97b334325c97","Type":"ContainerStarted","Data":"3f0c2f82b12d2b42ccedc7c7c0015038298a2ec8addf91ce54b2284229b3fdac"} Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.270017 4869 scope.go:117] "RemoveContainer" containerID="491940812e1be68d478139536f9e56d453d0bbaa55b31c7de0821a82a3941add" Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.317831 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58d5d7b545-z5bkh"] Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.336235 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58d5d7b545-z5bkh"] Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.337247 4869 scope.go:117] "RemoveContainer" containerID="91f6eb5c8a4a17f330260e4584c9c7e0690ef137a9c4b1b6cf593fa8bd4c28e6" Sep 29 14:01:48 crc kubenswrapper[4869]: I0929 14:01:48.712341 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:50 crc kubenswrapper[4869]: I0929 14:01:50.254054 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" path="/var/lib/kubelet/pods/8e4c81be-4e2d-4ae7-957c-bc6f257305e7/volumes" Sep 29 14:01:51 crc kubenswrapper[4869]: I0929 14:01:51.825524 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-58d5d7b545-z5bkh" podUID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.168:5353: i/o timeout" Sep 29 14:01:52 crc kubenswrapper[4869]: I0929 14:01:52.467035 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5b99b5b7f7-z8lvm" Sep 29 14:01:52 crc kubenswrapper[4869]: I0929 14:01:52.610306 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:52 crc kubenswrapper[4869]: I0929 14:01:52.852794 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5cbb465496-fhs6h" Sep 29 14:01:53 crc kubenswrapper[4869]: I0929 14:01:53.348796 4869 generic.go:334] "Generic (PLEG): container finished" podID="9f660561-4e72-4887-a736-ba1d7c48ba95" containerID="ae15503fdbbba09ac8036da2ebc5c3d959cfec382af40af75ac7790288ddda82" exitCode=0 Sep 29 14:01:53 crc kubenswrapper[4869]: I0929 14:01:53.348866 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" event={"ID":"9f660561-4e72-4887-a736-ba1d7c48ba95","Type":"ContainerDied","Data":"ae15503fdbbba09ac8036da2ebc5c3d959cfec382af40af75ac7790288ddda82"} Sep 29 14:01:53 crc kubenswrapper[4869]: I0929 14:01:53.357498 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4c90fd7a-697e-4f45-b219-97b334325c97","Type":"ContainerStarted","Data":"7ccbc43e343a590326da5e130378854cfd9eb2dc824169fc4c6aed4491d28681"} Sep 29 14:01:54 crc kubenswrapper[4869]: I0929 14:01:54.372391 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4c90fd7a-697e-4f45-b219-97b334325c97","Type":"ContainerStarted","Data":"103bd3db004da711da2a6438d65b8ff04c86dbd29830163b8ebe45f17d10379e"} Sep 29 14:01:54 crc kubenswrapper[4869]: I0929 14:01:54.372587 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" containerName="cinder-api-log" containerID="cri-o://7ccbc43e343a590326da5e130378854cfd9eb2dc824169fc4c6aed4491d28681" gracePeriod=30 Sep 29 14:01:54 crc kubenswrapper[4869]: I0929 14:01:54.372844 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Sep 29 14:01:54 crc kubenswrapper[4869]: I0929 14:01:54.373289 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" containerName="cinder-api" containerID="cri-o://103bd3db004da711da2a6438d65b8ff04c86dbd29830163b8ebe45f17d10379e" gracePeriod=30 Sep 29 14:01:54 crc kubenswrapper[4869]: I0929 14:01:54.415817 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=8.415790274 podStartE2EDuration="8.415790274s" podCreationTimestamp="2025-09-29 14:01:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:54.40409479 +0000 UTC m=+1240.844739120" watchObservedRunningTime="2025-09-29 14:01:54.415790274 +0000 UTC m=+1240.856434594" Sep 29 14:01:55 crc kubenswrapper[4869]: I0929 14:01:55.387555 4869 generic.go:334] "Generic (PLEG): container finished" podID="4c90fd7a-697e-4f45-b219-97b334325c97" containerID="103bd3db004da711da2a6438d65b8ff04c86dbd29830163b8ebe45f17d10379e" exitCode=0 Sep 29 14:01:55 crc kubenswrapper[4869]: I0929 14:01:55.387592 4869 generic.go:334] "Generic (PLEG): container finished" podID="4c90fd7a-697e-4f45-b219-97b334325c97" containerID="7ccbc43e343a590326da5e130378854cfd9eb2dc824169fc4c6aed4491d28681" exitCode=143 Sep 29 14:01:55 crc kubenswrapper[4869]: I0929 14:01:55.387656 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4c90fd7a-697e-4f45-b219-97b334325c97","Type":"ContainerDied","Data":"103bd3db004da711da2a6438d65b8ff04c86dbd29830163b8ebe45f17d10379e"} Sep 29 14:01:55 crc kubenswrapper[4869]: I0929 14:01:55.387685 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4c90fd7a-697e-4f45-b219-97b334325c97","Type":"ContainerDied","Data":"7ccbc43e343a590326da5e130378854cfd9eb2dc824169fc4c6aed4491d28681"} Sep 29 14:01:55 crc kubenswrapper[4869]: I0929 14:01:55.389330 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" event={"ID":"9f660561-4e72-4887-a736-ba1d7c48ba95","Type":"ContainerStarted","Data":"66d8b81db92b3b130128f4e90cf5e654e63f37a0c3163a15d52abc5c1358f253"} Sep 29 14:01:55 crc kubenswrapper[4869]: I0929 14:01:55.390667 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.239030 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" podStartSLOduration=10.239009543 podStartE2EDuration="10.239009543s" podCreationTimestamp="2025-09-29 14:01:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:01:55.412857307 +0000 UTC m=+1241.853501627" watchObservedRunningTime="2025-09-29 14:01:56.239009543 +0000 UTC m=+1242.679653863" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.254512 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Sep 29 14:01:56 crc kubenswrapper[4869]: E0929 14:01:56.254935 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerName="init" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.254957 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerName="init" Sep 29 14:01:56 crc kubenswrapper[4869]: E0929 14:01:56.254991 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerName="dnsmasq-dns" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.255001 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerName="dnsmasq-dns" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.255282 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e4c81be-4e2d-4ae7-957c-bc6f257305e7" containerName="dnsmasq-dns" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.256227 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.258380 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.259188 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.260519 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-v88gs" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.263970 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.334411 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-openstack-config-secret\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.334508 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.334596 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49tn8\" (UniqueName: \"kubernetes.io/projected/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-kube-api-access-49tn8\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.334657 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-openstack-config\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.409097 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4c90fd7a-697e-4f45-b219-97b334325c97","Type":"ContainerDied","Data":"3f0c2f82b12d2b42ccedc7c7c0015038298a2ec8addf91ce54b2284229b3fdac"} Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.409143 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f0c2f82b12d2b42ccedc7c7c0015038298a2ec8addf91ce54b2284229b3fdac" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.435967 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-openstack-config\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.436027 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-openstack-config-secret\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.436085 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.436157 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49tn8\" (UniqueName: \"kubernetes.io/projected/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-kube-api-access-49tn8\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.437004 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-openstack-config\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.440022 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-openstack-config-secret\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.445655 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.453288 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49tn8\" (UniqueName: \"kubernetes.io/projected/f3799b3a-38c7-44fc-8f60-1fd75e2dd751-kube-api-access-49tn8\") pod \"openstackclient\" (UID: \"f3799b3a-38c7-44fc-8f60-1fd75e2dd751\") " pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.532926 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.575580 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639269 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c90fd7a-697e-4f45-b219-97b334325c97-logs\") pod \"4c90fd7a-697e-4f45-b219-97b334325c97\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639436 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c90fd7a-697e-4f45-b219-97b334325c97-etc-machine-id\") pod \"4c90fd7a-697e-4f45-b219-97b334325c97\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639493 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-combined-ca-bundle\") pod \"4c90fd7a-697e-4f45-b219-97b334325c97\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639515 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-scripts\") pod \"4c90fd7a-697e-4f45-b219-97b334325c97\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639596 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data-custom\") pod \"4c90fd7a-697e-4f45-b219-97b334325c97\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639640 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xnvz\" (UniqueName: \"kubernetes.io/projected/4c90fd7a-697e-4f45-b219-97b334325c97-kube-api-access-9xnvz\") pod \"4c90fd7a-697e-4f45-b219-97b334325c97\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639673 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c90fd7a-697e-4f45-b219-97b334325c97-logs" (OuterVolumeSpecName: "logs") pod "4c90fd7a-697e-4f45-b219-97b334325c97" (UID: "4c90fd7a-697e-4f45-b219-97b334325c97"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639744 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data\") pod \"4c90fd7a-697e-4f45-b219-97b334325c97\" (UID: \"4c90fd7a-697e-4f45-b219-97b334325c97\") " Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.639975 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c90fd7a-697e-4f45-b219-97b334325c97-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4c90fd7a-697e-4f45-b219-97b334325c97" (UID: "4c90fd7a-697e-4f45-b219-97b334325c97"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.640320 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c90fd7a-697e-4f45-b219-97b334325c97-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.640345 4869 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c90fd7a-697e-4f45-b219-97b334325c97-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.646733 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-scripts" (OuterVolumeSpecName: "scripts") pod "4c90fd7a-697e-4f45-b219-97b334325c97" (UID: "4c90fd7a-697e-4f45-b219-97b334325c97"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.646924 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c90fd7a-697e-4f45-b219-97b334325c97-kube-api-access-9xnvz" (OuterVolumeSpecName: "kube-api-access-9xnvz") pod "4c90fd7a-697e-4f45-b219-97b334325c97" (UID: "4c90fd7a-697e-4f45-b219-97b334325c97"). InnerVolumeSpecName "kube-api-access-9xnvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.648516 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4c90fd7a-697e-4f45-b219-97b334325c97" (UID: "4c90fd7a-697e-4f45-b219-97b334325c97"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.671592 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c90fd7a-697e-4f45-b219-97b334325c97" (UID: "4c90fd7a-697e-4f45-b219-97b334325c97"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.710148 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data" (OuterVolumeSpecName: "config-data") pod "4c90fd7a-697e-4f45-b219-97b334325c97" (UID: "4c90fd7a-697e-4f45-b219-97b334325c97"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.744987 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.745028 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.745042 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.745051 4869 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c90fd7a-697e-4f45-b219-97b334325c97-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:56 crc kubenswrapper[4869]: I0929 14:01:56.745061 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xnvz\" (UniqueName: \"kubernetes.io/projected/4c90fd7a-697e-4f45-b219-97b334325c97-kube-api-access-9xnvz\") on node \"crc\" DevicePath \"\"" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.093270 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Sep 29 14:01:57 crc kubenswrapper[4869]: W0929 14:01:57.103238 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3799b3a_38c7_44fc_8f60_1fd75e2dd751.slice/crio-1b3c74f7ac4ed5cdde1985eb722267879c5aa21cf6e72fc830c5de1cb86a020f WatchSource:0}: Error finding container 1b3c74f7ac4ed5cdde1985eb722267879c5aa21cf6e72fc830c5de1cb86a020f: Status 404 returned error can't find the container with id 1b3c74f7ac4ed5cdde1985eb722267879c5aa21cf6e72fc830c5de1cb86a020f Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.422440 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"f3799b3a-38c7-44fc-8f60-1fd75e2dd751","Type":"ContainerStarted","Data":"1b3c74f7ac4ed5cdde1985eb722267879c5aa21cf6e72fc830c5de1cb86a020f"} Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.424989 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.425174 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5a5310c6-8efb-4909-9c4b-b52c08dd2839","Type":"ContainerStarted","Data":"115f60120d05a8f3a492dfaa7db6ff4cbe431f655457e11c9cc351fa2453d660"} Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.551859 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.579333 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.586407 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:57 crc kubenswrapper[4869]: E0929 14:01:57.586995 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" containerName="cinder-api-log" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.587017 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" containerName="cinder-api-log" Sep 29 14:01:57 crc kubenswrapper[4869]: E0929 14:01:57.587039 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" containerName="cinder-api" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.587045 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" containerName="cinder-api" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.587239 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" containerName="cinder-api" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.587257 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" containerName="cinder-api-log" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.588408 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.597979 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.598893 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.599158 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.599335 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668283 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/627d82b1-f6ec-45d5-a3ca-c633f22f076d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668431 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668461 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-scripts\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668493 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668517 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/627d82b1-f6ec-45d5-a3ca-c633f22f076d-logs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668543 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gwlc\" (UniqueName: \"kubernetes.io/projected/627d82b1-f6ec-45d5-a3ca-c633f22f076d-kube-api-access-5gwlc\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668570 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-config-data\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668592 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-config-data-custom\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.668670 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771176 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/627d82b1-f6ec-45d5-a3ca-c633f22f076d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771506 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771526 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-scripts\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771553 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771576 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/627d82b1-f6ec-45d5-a3ca-c633f22f076d-logs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771598 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gwlc\" (UniqueName: \"kubernetes.io/projected/627d82b1-f6ec-45d5-a3ca-c633f22f076d-kube-api-access-5gwlc\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771639 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-config-data\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771658 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-config-data-custom\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771683 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.771803 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/627d82b1-f6ec-45d5-a3ca-c633f22f076d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.772041 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/627d82b1-f6ec-45d5-a3ca-c633f22f076d-logs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.779034 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.779158 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-config-data-custom\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.779651 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-scripts\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.782525 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.783858 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.785328 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/627d82b1-f6ec-45d5-a3ca-c633f22f076d-config-data\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.794431 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gwlc\" (UniqueName: \"kubernetes.io/projected/627d82b1-f6ec-45d5-a3ca-c633f22f076d-kube-api-access-5gwlc\") pod \"cinder-api-0\" (UID: \"627d82b1-f6ec-45d5-a3ca-c633f22f076d\") " pod="openstack/cinder-api-0" Sep 29 14:01:57 crc kubenswrapper[4869]: I0929 14:01:57.908082 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Sep 29 14:01:58 crc kubenswrapper[4869]: I0929 14:01:58.259806 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c90fd7a-697e-4f45-b219-97b334325c97" path="/var/lib/kubelet/pods/4c90fd7a-697e-4f45-b219-97b334325c97/volumes" Sep 29 14:01:58 crc kubenswrapper[4869]: I0929 14:01:58.412824 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Sep 29 14:01:58 crc kubenswrapper[4869]: W0929 14:01:58.423358 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod627d82b1_f6ec_45d5_a3ca_c633f22f076d.slice/crio-75e51b8baab7e9943fc59107202955348061135f1fb4d8100d7bd87bff7aec0b WatchSource:0}: Error finding container 75e51b8baab7e9943fc59107202955348061135f1fb4d8100d7bd87bff7aec0b: Status 404 returned error can't find the container with id 75e51b8baab7e9943fc59107202955348061135f1fb4d8100d7bd87bff7aec0b Sep 29 14:01:58 crc kubenswrapper[4869]: I0929 14:01:58.443112 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5a5310c6-8efb-4909-9c4b-b52c08dd2839","Type":"ContainerStarted","Data":"a7bee6265ff3bc8b634705916f244bb26fa2d952182cb3deaaebb4489d4605c5"} Sep 29 14:01:58 crc kubenswrapper[4869]: I0929 14:01:58.445793 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"627d82b1-f6ec-45d5-a3ca-c633f22f076d","Type":"ContainerStarted","Data":"75e51b8baab7e9943fc59107202955348061135f1fb4d8100d7bd87bff7aec0b"} Sep 29 14:01:59 crc kubenswrapper[4869]: I0929 14:01:59.242369 4869 scope.go:117] "RemoveContainer" containerID="e91301f6e1e4263795b2bf9ee37c82f8ba38232b443c009f3f84db5aec3f0193" Sep 29 14:01:59 crc kubenswrapper[4869]: E0929 14:01:59.242915 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(103100a6-0dbb-481c-ba0e-4e7a2e5c38f6)\"" pod="openstack/watcher-decision-engine-0" podUID="103100a6-0dbb-481c-ba0e-4e7a2e5c38f6" Sep 29 14:01:59 crc kubenswrapper[4869]: I0929 14:01:59.457354 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"627d82b1-f6ec-45d5-a3ca-c633f22f076d","Type":"ContainerStarted","Data":"2e6664ebba665e233b15d32dbc4f3d2f702c076b717452548941b9e09b299bd2"} Sep 29 14:02:01 crc kubenswrapper[4869]: I0929 14:02:01.680276 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Sep 29 14:02:02 crc kubenswrapper[4869]: I0929 14:02:02.098868 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:02:02 crc kubenswrapper[4869]: I0929 14:02:02.120617 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=7.181802415 podStartE2EDuration="16.120594884s" podCreationTimestamp="2025-09-29 14:01:46 +0000 UTC" firstStartedPulling="2025-09-29 14:01:47.45685036 +0000 UTC m=+1233.897494680" lastFinishedPulling="2025-09-29 14:01:56.395642829 +0000 UTC m=+1242.836287149" observedRunningTime="2025-09-29 14:01:58.469895358 +0000 UTC m=+1244.910539678" watchObservedRunningTime="2025-09-29 14:02:02.120594884 +0000 UTC m=+1248.561239204" Sep 29 14:02:02 crc kubenswrapper[4869]: I0929 14:02:02.173630 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86dbb9557f-sjgn6"] Sep 29 14:02:02 crc kubenswrapper[4869]: I0929 14:02:02.173906 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" podUID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerName="dnsmasq-dns" containerID="cri-o://596ccfc878f588b6fb914a509f2f165b75e883a3fe8b4898dead6c0e29b686c3" gracePeriod=10 Sep 29 14:02:02 crc kubenswrapper[4869]: I0929 14:02:02.488024 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"627d82b1-f6ec-45d5-a3ca-c633f22f076d","Type":"ContainerStarted","Data":"3336c8e55bd343daa1df35682f8292addc632b074d10c090a4efe3ac8151e7c1"} Sep 29 14:02:03 crc kubenswrapper[4869]: I0929 14:02:03.502937 4869 generic.go:334] "Generic (PLEG): container finished" podID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerID="596ccfc878f588b6fb914a509f2f165b75e883a3fe8b4898dead6c0e29b686c3" exitCode=0 Sep 29 14:02:03 crc kubenswrapper[4869]: I0929 14:02:03.503015 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" event={"ID":"c026feba-a18f-4ced-9f53-a93aa8bca990","Type":"ContainerDied","Data":"596ccfc878f588b6fb914a509f2f165b75e883a3fe8b4898dead6c0e29b686c3"} Sep 29 14:02:03 crc kubenswrapper[4869]: I0929 14:02:03.503431 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Sep 29 14:02:03 crc kubenswrapper[4869]: I0929 14:02:03.527349 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.527323686 podStartE2EDuration="6.527323686s" podCreationTimestamp="2025-09-29 14:01:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:02:03.524938664 +0000 UTC m=+1249.965582984" watchObservedRunningTime="2025-09-29 14:02:03.527323686 +0000 UTC m=+1249.967968006" Sep 29 14:02:06 crc kubenswrapper[4869]: I0929 14:02:06.837004 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Sep 29 14:02:06 crc kubenswrapper[4869]: I0929 14:02:06.902770 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:02:07 crc kubenswrapper[4869]: I0929 14:02:07.009117 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:02:07 crc kubenswrapper[4869]: I0929 14:02:07.457708 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Sep 29 14:02:07 crc kubenswrapper[4869]: I0929 14:02:07.457855 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Sep 29 14:02:07 crc kubenswrapper[4869]: I0929 14:02:07.459881 4869 scope.go:117] "RemoveContainer" containerID="e91301f6e1e4263795b2bf9ee37c82f8ba38232b443c009f3f84db5aec3f0193" Sep 29 14:02:07 crc kubenswrapper[4869]: I0929 14:02:07.555709 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerName="cinder-scheduler" containerID="cri-o://115f60120d05a8f3a492dfaa7db6ff4cbe431f655457e11c9cc351fa2453d660" gracePeriod=30 Sep 29 14:02:07 crc kubenswrapper[4869]: I0929 14:02:07.556064 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerName="probe" containerID="cri-o://a7bee6265ff3bc8b634705916f244bb26fa2d952182cb3deaaebb4489d4605c5" gracePeriod=30 Sep 29 14:02:08 crc kubenswrapper[4869]: I0929 14:02:08.457329 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.221953 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.349156 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgw48\" (UniqueName: \"kubernetes.io/projected/c026feba-a18f-4ced-9f53-a93aa8bca990-kube-api-access-tgw48\") pod \"c026feba-a18f-4ced-9f53-a93aa8bca990\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.349515 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-config\") pod \"c026feba-a18f-4ced-9f53-a93aa8bca990\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.349588 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-dns-svc\") pod \"c026feba-a18f-4ced-9f53-a93aa8bca990\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.349769 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-sb\") pod \"c026feba-a18f-4ced-9f53-a93aa8bca990\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.349930 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-nb\") pod \"c026feba-a18f-4ced-9f53-a93aa8bca990\" (UID: \"c026feba-a18f-4ced-9f53-a93aa8bca990\") " Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.358388 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c026feba-a18f-4ced-9f53-a93aa8bca990-kube-api-access-tgw48" (OuterVolumeSpecName: "kube-api-access-tgw48") pod "c026feba-a18f-4ced-9f53-a93aa8bca990" (UID: "c026feba-a18f-4ced-9f53-a93aa8bca990"). InnerVolumeSpecName "kube-api-access-tgw48". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.420806 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c026feba-a18f-4ced-9f53-a93aa8bca990" (UID: "c026feba-a18f-4ced-9f53-a93aa8bca990"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.421178 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c026feba-a18f-4ced-9f53-a93aa8bca990" (UID: "c026feba-a18f-4ced-9f53-a93aa8bca990"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.422603 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-config" (OuterVolumeSpecName: "config") pod "c026feba-a18f-4ced-9f53-a93aa8bca990" (UID: "c026feba-a18f-4ced-9f53-a93aa8bca990"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.447413 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c026feba-a18f-4ced-9f53-a93aa8bca990" (UID: "c026feba-a18f-4ced-9f53-a93aa8bca990"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.452516 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.452561 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgw48\" (UniqueName: \"kubernetes.io/projected/c026feba-a18f-4ced-9f53-a93aa8bca990-kube-api-access-tgw48\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.452578 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.452589 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.452601 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c026feba-a18f-4ced-9f53-a93aa8bca990-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.583022 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"103100a6-0dbb-481c-ba0e-4e7a2e5c38f6","Type":"ContainerStarted","Data":"583761aa27314ec4d6c4abee49ef91850dca18294209973f2410bc3c5622fbd2"} Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.592077 4869 generic.go:334] "Generic (PLEG): container finished" podID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerID="a7bee6265ff3bc8b634705916f244bb26fa2d952182cb3deaaebb4489d4605c5" exitCode=0 Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.592162 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5a5310c6-8efb-4909-9c4b-b52c08dd2839","Type":"ContainerDied","Data":"a7bee6265ff3bc8b634705916f244bb26fa2d952182cb3deaaebb4489d4605c5"} Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.596746 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" event={"ID":"c026feba-a18f-4ced-9f53-a93aa8bca990","Type":"ContainerDied","Data":"0a514b57d6fcbc5bdd23db749866ff562917afd3095ac26c9c6c0d19fe92fe4a"} Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.596970 4869 scope.go:117] "RemoveContainer" containerID="596ccfc878f588b6fb914a509f2f165b75e883a3fe8b4898dead6c0e29b686c3" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.597377 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.662878 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86dbb9557f-sjgn6"] Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.670452 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86dbb9557f-sjgn6"] Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.677565 4869 scope.go:117] "RemoveContainer" containerID="4d2946f21ee5551fcbb04e6d11cc6429f5a777e36baa2592cbb21eb77d602c3c" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.732172 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-984cbc747-hdvk6" Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.802468 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7bd86d5cfd-4t4bw"] Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.803202 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7bd86d5cfd-4t4bw" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerName="neutron-api" containerID="cri-o://7f1405eb99d2070fe8a5854d67c480badadc78612d569db616b4d9a2b4424b14" gracePeriod=30 Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.803371 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7bd86d5cfd-4t4bw" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerName="neutron-httpd" containerID="cri-o://7ffb4210b69afe8718ce5ce3fb257363dc978f160ff09dc75edb17969f1543a8" gracePeriod=30 Sep 29 14:02:09 crc kubenswrapper[4869]: I0929 14:02:09.972159 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.010082 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.010375 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="ceilometer-central-agent" containerID="cri-o://cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261" gracePeriod=30 Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.010574 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="ceilometer-notification-agent" containerID="cri-o://1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4" gracePeriod=30 Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.010574 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="sg-core" containerID="cri-o://f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691" gracePeriod=30 Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.010583 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="proxy-httpd" containerID="cri-o://3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430" gracePeriod=30 Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.258681 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c026feba-a18f-4ced-9f53-a93aa8bca990" path="/var/lib/kubelet/pods/c026feba-a18f-4ced-9f53-a93aa8bca990/volumes" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.653947 4869 generic.go:334] "Generic (PLEG): container finished" podID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerID="115f60120d05a8f3a492dfaa7db6ff4cbe431f655457e11c9cc351fa2453d660" exitCode=0 Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.654409 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5a5310c6-8efb-4909-9c4b-b52c08dd2839","Type":"ContainerDied","Data":"115f60120d05a8f3a492dfaa7db6ff4cbe431f655457e11c9cc351fa2453d660"} Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.658661 4869 generic.go:334] "Generic (PLEG): container finished" podID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerID="3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430" exitCode=0 Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.658695 4869 generic.go:334] "Generic (PLEG): container finished" podID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerID="f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691" exitCode=2 Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.658838 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerDied","Data":"3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430"} Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.658906 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerDied","Data":"f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691"} Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.668277 4869 generic.go:334] "Generic (PLEG): container finished" podID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerID="7ffb4210b69afe8718ce5ce3fb257363dc978f160ff09dc75edb17969f1543a8" exitCode=0 Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.668402 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bd86d5cfd-4t4bw" event={"ID":"472c07ff-186c-458b-ad02-d616f6a6dfca","Type":"ContainerDied","Data":"7ffb4210b69afe8718ce5ce3fb257363dc978f160ff09dc75edb17969f1543a8"} Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.711682 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"f3799b3a-38c7-44fc-8f60-1fd75e2dd751","Type":"ContainerStarted","Data":"079989e1f8df5683c62f2b340d9153799697be407b95fbf97dab7f4e038c7227"} Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.763114 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.317446682 podStartE2EDuration="14.763094061s" podCreationTimestamp="2025-09-29 14:01:56 +0000 UTC" firstStartedPulling="2025-09-29 14:01:57.106092354 +0000 UTC m=+1243.546736674" lastFinishedPulling="2025-09-29 14:02:09.551739733 +0000 UTC m=+1255.992384053" observedRunningTime="2025-09-29 14:02:10.754869907 +0000 UTC m=+1257.195514227" watchObservedRunningTime="2025-09-29 14:02:10.763094061 +0000 UTC m=+1257.203738381" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.807365 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.898418 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-combined-ca-bundle\") pod \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.898547 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data\") pod \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.898591 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data-custom\") pod \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.898678 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-scripts\") pod \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.898762 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66xxl\" (UniqueName: \"kubernetes.io/projected/5a5310c6-8efb-4909-9c4b-b52c08dd2839-kube-api-access-66xxl\") pod \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.898808 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a5310c6-8efb-4909-9c4b-b52c08dd2839-etc-machine-id\") pod \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\" (UID: \"5a5310c6-8efb-4909-9c4b-b52c08dd2839\") " Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.899201 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5a5310c6-8efb-4909-9c4b-b52c08dd2839-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5a5310c6-8efb-4909-9c4b-b52c08dd2839" (UID: "5a5310c6-8efb-4909-9c4b-b52c08dd2839"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.900480 4869 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a5310c6-8efb-4909-9c4b-b52c08dd2839-etc-machine-id\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.904911 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5a5310c6-8efb-4909-9c4b-b52c08dd2839" (UID: "5a5310c6-8efb-4909-9c4b-b52c08dd2839"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.907531 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-scripts" (OuterVolumeSpecName: "scripts") pod "5a5310c6-8efb-4909-9c4b-b52c08dd2839" (UID: "5a5310c6-8efb-4909-9c4b-b52c08dd2839"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.919436 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a5310c6-8efb-4909-9c4b-b52c08dd2839-kube-api-access-66xxl" (OuterVolumeSpecName: "kube-api-access-66xxl") pod "5a5310c6-8efb-4909-9c4b-b52c08dd2839" (UID: "5a5310c6-8efb-4909-9c4b-b52c08dd2839"). InnerVolumeSpecName "kube-api-access-66xxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.954700 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a5310c6-8efb-4909-9c4b-b52c08dd2839" (UID: "5a5310c6-8efb-4909-9c4b-b52c08dd2839"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:10 crc kubenswrapper[4869]: I0929 14:02:10.997069 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86dbb9557f-sjgn6" podUID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: i/o timeout" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.002029 4869 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data-custom\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.002067 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.002079 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66xxl\" (UniqueName: \"kubernetes.io/projected/5a5310c6-8efb-4909-9c4b-b52c08dd2839-kube-api-access-66xxl\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.002092 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.014056 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data" (OuterVolumeSpecName: "config-data") pod "5a5310c6-8efb-4909-9c4b-b52c08dd2839" (UID: "5a5310c6-8efb-4909-9c4b-b52c08dd2839"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.104384 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a5310c6-8efb-4909-9c4b-b52c08dd2839-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.722288 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5a5310c6-8efb-4909-9c4b-b52c08dd2839","Type":"ContainerDied","Data":"97cedc85589cabbfb9bda9ead18a30f6d1829011a981d0ac1e9aeb1b880f1b0d"} Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.722334 4869 scope.go:117] "RemoveContainer" containerID="a7bee6265ff3bc8b634705916f244bb26fa2d952182cb3deaaebb4489d4605c5" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.722335 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.726117 4869 generic.go:334] "Generic (PLEG): container finished" podID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerID="cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261" exitCode=0 Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.726804 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerDied","Data":"cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261"} Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.744755 4869 scope.go:117] "RemoveContainer" containerID="115f60120d05a8f3a492dfaa7db6ff4cbe431f655457e11c9cc351fa2453d660" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.753855 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.767274 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.778578 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:02:11 crc kubenswrapper[4869]: E0929 14:02:11.779128 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerName="probe" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.779154 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerName="probe" Sep 29 14:02:11 crc kubenswrapper[4869]: E0929 14:02:11.779187 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerName="init" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.779197 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerName="init" Sep 29 14:02:11 crc kubenswrapper[4869]: E0929 14:02:11.779236 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerName="dnsmasq-dns" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.779246 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerName="dnsmasq-dns" Sep 29 14:02:11 crc kubenswrapper[4869]: E0929 14:02:11.779264 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerName="cinder-scheduler" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.779274 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerName="cinder-scheduler" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.779536 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerName="probe" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.779580 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="c026feba-a18f-4ced-9f53-a93aa8bca990" containerName="dnsmasq-dns" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.779594 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" containerName="cinder-scheduler" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.781211 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.784203 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.790136 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.919567 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.919697 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-scripts\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.919722 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmzd9\" (UniqueName: \"kubernetes.io/projected/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-kube-api-access-qmzd9\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.919760 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.919940 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:11 crc kubenswrapper[4869]: I0929 14:02:11.920044 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-config-data\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.021572 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.021722 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-scripts\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.021745 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmzd9\" (UniqueName: \"kubernetes.io/projected/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-kube-api-access-qmzd9\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.021793 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.021845 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.021870 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-config-data\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.022871 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.026010 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.026460 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-scripts\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.027574 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-config-data\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.028926 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.040638 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmzd9\" (UniqueName: \"kubernetes.io/projected/31cbeb64-7a21-4885-b8ad-9fa5b42508f9-kube-api-access-qmzd9\") pod \"cinder-scheduler-0\" (UID: \"31cbeb64-7a21-4885-b8ad-9fa5b42508f9\") " pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.103100 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.252096 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a5310c6-8efb-4909-9c4b-b52c08dd2839" path="/var/lib/kubelet/pods/5a5310c6-8efb-4909-9c4b-b52c08dd2839/volumes" Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.564386 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Sep 29 14:02:12 crc kubenswrapper[4869]: I0929 14:02:12.742837 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"31cbeb64-7a21-4885-b8ad-9fa5b42508f9","Type":"ContainerStarted","Data":"d94d321b12420becf803de4dbd9b597306de500d77b3467407f292f3fcd8c3a7"} Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.439174 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.567948 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-log-httpd\") pod \"f55548dd-0cb5-4749-a08b-cba0212a56ae\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.568087 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-combined-ca-bundle\") pod \"f55548dd-0cb5-4749-a08b-cba0212a56ae\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.568118 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-run-httpd\") pod \"f55548dd-0cb5-4749-a08b-cba0212a56ae\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.568213 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zgsx\" (UniqueName: \"kubernetes.io/projected/f55548dd-0cb5-4749-a08b-cba0212a56ae-kube-api-access-9zgsx\") pod \"f55548dd-0cb5-4749-a08b-cba0212a56ae\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.568241 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-scripts\") pod \"f55548dd-0cb5-4749-a08b-cba0212a56ae\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.568336 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-config-data\") pod \"f55548dd-0cb5-4749-a08b-cba0212a56ae\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.568365 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-sg-core-conf-yaml\") pod \"f55548dd-0cb5-4749-a08b-cba0212a56ae\" (UID: \"f55548dd-0cb5-4749-a08b-cba0212a56ae\") " Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.569083 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f55548dd-0cb5-4749-a08b-cba0212a56ae" (UID: "f55548dd-0cb5-4749-a08b-cba0212a56ae"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.569144 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f55548dd-0cb5-4749-a08b-cba0212a56ae" (UID: "f55548dd-0cb5-4749-a08b-cba0212a56ae"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.575772 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-scripts" (OuterVolumeSpecName: "scripts") pod "f55548dd-0cb5-4749-a08b-cba0212a56ae" (UID: "f55548dd-0cb5-4749-a08b-cba0212a56ae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.575790 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f55548dd-0cb5-4749-a08b-cba0212a56ae-kube-api-access-9zgsx" (OuterVolumeSpecName: "kube-api-access-9zgsx") pod "f55548dd-0cb5-4749-a08b-cba0212a56ae" (UID: "f55548dd-0cb5-4749-a08b-cba0212a56ae"). InnerVolumeSpecName "kube-api-access-9zgsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.596996 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f55548dd-0cb5-4749-a08b-cba0212a56ae" (UID: "f55548dd-0cb5-4749-a08b-cba0212a56ae"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.661970 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f55548dd-0cb5-4749-a08b-cba0212a56ae" (UID: "f55548dd-0cb5-4749-a08b-cba0212a56ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.670958 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zgsx\" (UniqueName: \"kubernetes.io/projected/f55548dd-0cb5-4749-a08b-cba0212a56ae-kube-api-access-9zgsx\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.670994 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.671009 4869 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.671022 4869 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.671034 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.671046 4869 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f55548dd-0cb5-4749-a08b-cba0212a56ae-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.688910 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-config-data" (OuterVolumeSpecName: "config-data") pod "f55548dd-0cb5-4749-a08b-cba0212a56ae" (UID: "f55548dd-0cb5-4749-a08b-cba0212a56ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.773996 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f55548dd-0cb5-4749-a08b-cba0212a56ae-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.793016 4869 generic.go:334] "Generic (PLEG): container finished" podID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerID="1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4" exitCode=0 Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.793080 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerDied","Data":"1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4"} Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.793109 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f55548dd-0cb5-4749-a08b-cba0212a56ae","Type":"ContainerDied","Data":"005303ae954094a1872f5757eb18c4949e6269fec32bbbca8cc3fbaa4a5d8bca"} Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.793124 4869 scope.go:117] "RemoveContainer" containerID="3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.793223 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.812229 4869 generic.go:334] "Generic (PLEG): container finished" podID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerID="7f1405eb99d2070fe8a5854d67c480badadc78612d569db616b4d9a2b4424b14" exitCode=0 Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.812323 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bd86d5cfd-4t4bw" event={"ID":"472c07ff-186c-458b-ad02-d616f6a6dfca","Type":"ContainerDied","Data":"7f1405eb99d2070fe8a5854d67c480badadc78612d569db616b4d9a2b4424b14"} Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.823071 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"31cbeb64-7a21-4885-b8ad-9fa5b42508f9","Type":"ContainerStarted","Data":"6bfe41f7cd9566c98f4fae0d49994e51c1cab4c98fdf546a10e4858424b2aa3b"} Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.843589 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.866908 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.899452 4869 scope.go:117] "RemoveContainer" containerID="f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.920777 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:13 crc kubenswrapper[4869]: E0929 14:02:13.921660 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="ceilometer-central-agent" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.921681 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="ceilometer-central-agent" Sep 29 14:02:13 crc kubenswrapper[4869]: E0929 14:02:13.921710 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="sg-core" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.921716 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="sg-core" Sep 29 14:02:13 crc kubenswrapper[4869]: E0929 14:02:13.921740 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="ceilometer-notification-agent" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.921747 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="ceilometer-notification-agent" Sep 29 14:02:13 crc kubenswrapper[4869]: E0929 14:02:13.921780 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="proxy-httpd" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.921788 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="proxy-httpd" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.922170 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="sg-core" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.922186 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="ceilometer-notification-agent" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.922200 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="ceilometer-central-agent" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.922218 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" containerName="proxy-httpd" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.927175 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.934876 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.936461 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.958510 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:13 crc kubenswrapper[4869]: I0929 14:02:13.969104 4869 scope.go:117] "RemoveContainer" containerID="1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.014628 4869 scope.go:117] "RemoveContainer" containerID="cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261" Sep 29 14:02:14 crc kubenswrapper[4869]: E0929 14:02:14.018247 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf55548dd_0cb5_4749_a08b_cba0212a56ae.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod472c07ff_186c_458b_ad02_d616f6a6dfca.slice/crio-7f1405eb99d2070fe8a5854d67c480badadc78612d569db616b4d9a2b4424b14.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.045202 4869 scope.go:117] "RemoveContainer" containerID="3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430" Sep 29 14:02:14 crc kubenswrapper[4869]: E0929 14:02:14.046095 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430\": container with ID starting with 3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430 not found: ID does not exist" containerID="3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.046139 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430"} err="failed to get container status \"3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430\": rpc error: code = NotFound desc = could not find container \"3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430\": container with ID starting with 3f28ae1b6c8f1ef6c41245f25dad813caf0573e177d5ddea26ae158c9aa4b430 not found: ID does not exist" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.046164 4869 scope.go:117] "RemoveContainer" containerID="f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691" Sep 29 14:02:14 crc kubenswrapper[4869]: E0929 14:02:14.046434 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691\": container with ID starting with f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691 not found: ID does not exist" containerID="f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.046465 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691"} err="failed to get container status \"f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691\": rpc error: code = NotFound desc = could not find container \"f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691\": container with ID starting with f6c74c4046586c2f0db7d7f0a1c7c4c96a186d06be911c2c8c5275ffc2422691 not found: ID does not exist" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.046496 4869 scope.go:117] "RemoveContainer" containerID="1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4" Sep 29 14:02:14 crc kubenswrapper[4869]: E0929 14:02:14.046705 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4\": container with ID starting with 1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4 not found: ID does not exist" containerID="1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.046731 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4"} err="failed to get container status \"1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4\": rpc error: code = NotFound desc = could not find container \"1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4\": container with ID starting with 1788641d02b83570d32f5671127492c9fefe6a35d0aca4bf90577dcf161464d4 not found: ID does not exist" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.046745 4869 scope.go:117] "RemoveContainer" containerID="cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261" Sep 29 14:02:14 crc kubenswrapper[4869]: E0929 14:02:14.046897 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261\": container with ID starting with cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261 not found: ID does not exist" containerID="cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.046914 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261"} err="failed to get container status \"cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261\": rpc error: code = NotFound desc = could not find container \"cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261\": container with ID starting with cf05aa0491b8b028a0d4068caf0c21c1935742017ba25d581c7ab29719dff261 not found: ID does not exist" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.110535 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-scripts\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.110585 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqp7n\" (UniqueName: \"kubernetes.io/projected/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-kube-api-access-gqp7n\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.110675 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.110722 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.110765 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-log-httpd\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.110901 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-config-data\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.110960 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-run-httpd\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.213597 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.213655 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.213690 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-log-httpd\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.213782 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-config-data\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.213810 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-run-httpd\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.213857 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-scripts\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.213873 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqp7n\" (UniqueName: \"kubernetes.io/projected/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-kube-api-access-gqp7n\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.214703 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-run-httpd\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.214892 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-log-httpd\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.216780 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.218020 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.228302 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-config-data\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.231652 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.235997 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.244368 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqp7n\" (UniqueName: \"kubernetes.io/projected/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-kube-api-access-gqp7n\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.244517 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-scripts\") pod \"ceilometer-0\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.268892 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.271547 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f55548dd-0cb5-4749-a08b-cba0212a56ae" path="/var/lib/kubelet/pods/f55548dd-0cb5-4749-a08b-cba0212a56ae/volumes" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.418900 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.521275 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-httpd-config\") pod \"472c07ff-186c-458b-ad02-d616f6a6dfca\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.521364 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc6l9\" (UniqueName: \"kubernetes.io/projected/472c07ff-186c-458b-ad02-d616f6a6dfca-kube-api-access-vc6l9\") pod \"472c07ff-186c-458b-ad02-d616f6a6dfca\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.521458 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-ovndb-tls-certs\") pod \"472c07ff-186c-458b-ad02-d616f6a6dfca\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.521531 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-combined-ca-bundle\") pod \"472c07ff-186c-458b-ad02-d616f6a6dfca\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.521557 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-config\") pod \"472c07ff-186c-458b-ad02-d616f6a6dfca\" (UID: \"472c07ff-186c-458b-ad02-d616f6a6dfca\") " Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.527690 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "472c07ff-186c-458b-ad02-d616f6a6dfca" (UID: "472c07ff-186c-458b-ad02-d616f6a6dfca"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.536822 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/472c07ff-186c-458b-ad02-d616f6a6dfca-kube-api-access-vc6l9" (OuterVolumeSpecName: "kube-api-access-vc6l9") pod "472c07ff-186c-458b-ad02-d616f6a6dfca" (UID: "472c07ff-186c-458b-ad02-d616f6a6dfca"). InnerVolumeSpecName "kube-api-access-vc6l9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.595959 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "472c07ff-186c-458b-ad02-d616f6a6dfca" (UID: "472c07ff-186c-458b-ad02-d616f6a6dfca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.615767 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "472c07ff-186c-458b-ad02-d616f6a6dfca" (UID: "472c07ff-186c-458b-ad02-d616f6a6dfca"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.616096 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-config" (OuterVolumeSpecName: "config") pod "472c07ff-186c-458b-ad02-d616f6a6dfca" (UID: "472c07ff-186c-458b-ad02-d616f6a6dfca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.623568 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc6l9\" (UniqueName: \"kubernetes.io/projected/472c07ff-186c-458b-ad02-d616f6a6dfca-kube-api-access-vc6l9\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.623593 4869 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.623627 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.623639 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.623649 4869 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/472c07ff-186c-458b-ad02-d616f6a6dfca-httpd-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:14 crc kubenswrapper[4869]: W0929 14:02:14.781458 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5d10c10_0ba4_42ab_ab64_c1211ca8efa8.slice/crio-0e471e26759feb9d6371ec7a5e247254d4af1a1d66b11447a3a7a5bdacead639 WatchSource:0}: Error finding container 0e471e26759feb9d6371ec7a5e247254d4af1a1d66b11447a3a7a5bdacead639: Status 404 returned error can't find the container with id 0e471e26759feb9d6371ec7a5e247254d4af1a1d66b11447a3a7a5bdacead639 Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.782481 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.834667 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"31cbeb64-7a21-4885-b8ad-9fa5b42508f9","Type":"ContainerStarted","Data":"7d08f1a5f0f60870f69300a6fcfb2c543b1e894c381ff9596a9f273e3c007ef7"} Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.841311 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bd86d5cfd-4t4bw" event={"ID":"472c07ff-186c-458b-ad02-d616f6a6dfca","Type":"ContainerDied","Data":"2a953683bb2bef1805b01a2cd2cc5c4036459ae72d1379e523ad304e530d9a0b"} Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.841323 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7bd86d5cfd-4t4bw" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.841401 4869 scope.go:117] "RemoveContainer" containerID="7ffb4210b69afe8718ce5ce3fb257363dc978f160ff09dc75edb17969f1543a8" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.843201 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerStarted","Data":"0e471e26759feb9d6371ec7a5e247254d4af1a1d66b11447a3a7a5bdacead639"} Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.869023 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.869005301 podStartE2EDuration="3.869005301s" podCreationTimestamp="2025-09-29 14:02:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:02:14.863946639 +0000 UTC m=+1261.304590979" watchObservedRunningTime="2025-09-29 14:02:14.869005301 +0000 UTC m=+1261.309649621" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.890163 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7bd86d5cfd-4t4bw"] Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.892627 4869 scope.go:117] "RemoveContainer" containerID="7f1405eb99d2070fe8a5854d67c480badadc78612d569db616b4d9a2b4424b14" Sep 29 14:02:14 crc kubenswrapper[4869]: I0929 14:02:14.899542 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7bd86d5cfd-4t4bw"] Sep 29 14:02:15 crc kubenswrapper[4869]: I0929 14:02:15.855522 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerStarted","Data":"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab"} Sep 29 14:02:15 crc kubenswrapper[4869]: I0929 14:02:15.856142 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerStarted","Data":"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c"} Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.266229 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" path="/var/lib/kubelet/pods/472c07ff-186c-458b-ad02-d616f6a6dfca/volumes" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.325556 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-c4pdp"] Sep 29 14:02:16 crc kubenswrapper[4869]: E0929 14:02:16.326047 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerName="neutron-httpd" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.326069 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerName="neutron-httpd" Sep 29 14:02:16 crc kubenswrapper[4869]: E0929 14:02:16.326122 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerName="neutron-api" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.326132 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerName="neutron-api" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.326360 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerName="neutron-api" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.326390 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="472c07ff-186c-458b-ad02-d616f6a6dfca" containerName="neutron-httpd" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.327238 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-c4pdp" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.355178 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-c4pdp"] Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.421591 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-95tdg"] Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.422893 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-95tdg" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.445745 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-95tdg"] Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.463677 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgdwh\" (UniqueName: \"kubernetes.io/projected/8b222443-94e1-4451-9fdd-3240cba54a38-kube-api-access-bgdwh\") pod \"nova-api-db-create-c4pdp\" (UID: \"8b222443-94e1-4451-9fdd-3240cba54a38\") " pod="openstack/nova-api-db-create-c4pdp" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.531142 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-lttz8"] Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.532684 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-lttz8" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.550425 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-lttz8"] Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.566931 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgdwh\" (UniqueName: \"kubernetes.io/projected/8b222443-94e1-4451-9fdd-3240cba54a38-kube-api-access-bgdwh\") pod \"nova-api-db-create-c4pdp\" (UID: \"8b222443-94e1-4451-9fdd-3240cba54a38\") " pod="openstack/nova-api-db-create-c4pdp" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.566994 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpjh8\" (UniqueName: \"kubernetes.io/projected/27cd3399-68ec-4d81-8a5c-695980634d8c-kube-api-access-rpjh8\") pod \"nova-cell0-db-create-95tdg\" (UID: \"27cd3399-68ec-4d81-8a5c-695980634d8c\") " pod="openstack/nova-cell0-db-create-95tdg" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.593370 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgdwh\" (UniqueName: \"kubernetes.io/projected/8b222443-94e1-4451-9fdd-3240cba54a38-kube-api-access-bgdwh\") pod \"nova-api-db-create-c4pdp\" (UID: \"8b222443-94e1-4451-9fdd-3240cba54a38\") " pod="openstack/nova-api-db-create-c4pdp" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.668233 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpjh8\" (UniqueName: \"kubernetes.io/projected/27cd3399-68ec-4d81-8a5c-695980634d8c-kube-api-access-rpjh8\") pod \"nova-cell0-db-create-95tdg\" (UID: \"27cd3399-68ec-4d81-8a5c-695980634d8c\") " pod="openstack/nova-cell0-db-create-95tdg" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.668582 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwqhr\" (UniqueName: \"kubernetes.io/projected/8adecd5f-ad38-4d6c-a1d7-f382f5357c48-kube-api-access-gwqhr\") pod \"nova-cell1-db-create-lttz8\" (UID: \"8adecd5f-ad38-4d6c-a1d7-f382f5357c48\") " pod="openstack/nova-cell1-db-create-lttz8" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.668679 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-c4pdp" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.698868 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpjh8\" (UniqueName: \"kubernetes.io/projected/27cd3399-68ec-4d81-8a5c-695980634d8c-kube-api-access-rpjh8\") pod \"nova-cell0-db-create-95tdg\" (UID: \"27cd3399-68ec-4d81-8a5c-695980634d8c\") " pod="openstack/nova-cell0-db-create-95tdg" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.738294 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-95tdg" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.770528 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwqhr\" (UniqueName: \"kubernetes.io/projected/8adecd5f-ad38-4d6c-a1d7-f382f5357c48-kube-api-access-gwqhr\") pod \"nova-cell1-db-create-lttz8\" (UID: \"8adecd5f-ad38-4d6c-a1d7-f382f5357c48\") " pod="openstack/nova-cell1-db-create-lttz8" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.807466 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwqhr\" (UniqueName: \"kubernetes.io/projected/8adecd5f-ad38-4d6c-a1d7-f382f5357c48-kube-api-access-gwqhr\") pod \"nova-cell1-db-create-lttz8\" (UID: \"8adecd5f-ad38-4d6c-a1d7-f382f5357c48\") " pod="openstack/nova-cell1-db-create-lttz8" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.867273 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-lttz8" Sep 29 14:02:16 crc kubenswrapper[4869]: I0929 14:02:16.880203 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerStarted","Data":"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e"} Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.104046 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.223402 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-c4pdp"] Sep 29 14:02:17 crc kubenswrapper[4869]: W0929 14:02:17.241047 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b222443_94e1_4451_9fdd_3240cba54a38.slice/crio-d88b0ebdf81d2f263dea9c181091cffaa63dca964842773b1b5eb78d3b0b9ce4 WatchSource:0}: Error finding container d88b0ebdf81d2f263dea9c181091cffaa63dca964842773b1b5eb78d3b0b9ce4: Status 404 returned error can't find the container with id d88b0ebdf81d2f263dea9c181091cffaa63dca964842773b1b5eb78d3b0b9ce4 Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.346174 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-95tdg"] Sep 29 14:02:17 crc kubenswrapper[4869]: W0929 14:02:17.359855 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27cd3399_68ec_4d81_8a5c_695980634d8c.slice/crio-3025dc53326848010d2281b7afebde3e8316135ca071b5c1fb2a53bc010272e3 WatchSource:0}: Error finding container 3025dc53326848010d2281b7afebde3e8316135ca071b5c1fb2a53bc010272e3: Status 404 returned error can't find the container with id 3025dc53326848010d2281b7afebde3e8316135ca071b5c1fb2a53bc010272e3 Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.446344 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.449886 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-lttz8"] Sep 29 14:02:17 crc kubenswrapper[4869]: W0929 14:02:17.457733 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8adecd5f_ad38_4d6c_a1d7_f382f5357c48.slice/crio-f1807f49ab53ddc3feb98423e499acc2c20ac76396f2736c32bf97652f75d3a3 WatchSource:0}: Error finding container f1807f49ab53ddc3feb98423e499acc2c20ac76396f2736c32bf97652f75d3a3: Status 404 returned error can't find the container with id f1807f49ab53ddc3feb98423e499acc2c20ac76396f2736c32bf97652f75d3a3 Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.488160 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.891381 4869 generic.go:334] "Generic (PLEG): container finished" podID="8adecd5f-ad38-4d6c-a1d7-f382f5357c48" containerID="8a217821e33319e4e8f68d3771142dc639f890cfafa66e7e40b8f7264817b434" exitCode=0 Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.891447 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-lttz8" event={"ID":"8adecd5f-ad38-4d6c-a1d7-f382f5357c48","Type":"ContainerDied","Data":"8a217821e33319e4e8f68d3771142dc639f890cfafa66e7e40b8f7264817b434"} Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.891479 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-lttz8" event={"ID":"8adecd5f-ad38-4d6c-a1d7-f382f5357c48","Type":"ContainerStarted","Data":"f1807f49ab53ddc3feb98423e499acc2c20ac76396f2736c32bf97652f75d3a3"} Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.893827 4869 generic.go:334] "Generic (PLEG): container finished" podID="27cd3399-68ec-4d81-8a5c-695980634d8c" containerID="b0d591311cd48a960363e8123c0559e78aaef7c766c29898e1144a1bf4d59f2b" exitCode=0 Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.893880 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-95tdg" event={"ID":"27cd3399-68ec-4d81-8a5c-695980634d8c","Type":"ContainerDied","Data":"b0d591311cd48a960363e8123c0559e78aaef7c766c29898e1144a1bf4d59f2b"} Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.893900 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-95tdg" event={"ID":"27cd3399-68ec-4d81-8a5c-695980634d8c","Type":"ContainerStarted","Data":"3025dc53326848010d2281b7afebde3e8316135ca071b5c1fb2a53bc010272e3"} Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.896451 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerStarted","Data":"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae"} Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.897087 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.898466 4869 generic.go:334] "Generic (PLEG): container finished" podID="8b222443-94e1-4451-9fdd-3240cba54a38" containerID="d38f66d7729ddafdd54b8caf3d46433c8fe07756d799abde3f03ccb03f489683" exitCode=0 Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.898562 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-c4pdp" event={"ID":"8b222443-94e1-4451-9fdd-3240cba54a38","Type":"ContainerDied","Data":"d38f66d7729ddafdd54b8caf3d46433c8fe07756d799abde3f03ccb03f489683"} Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.898624 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-c4pdp" event={"ID":"8b222443-94e1-4451-9fdd-3240cba54a38","Type":"ContainerStarted","Data":"d88b0ebdf81d2f263dea9c181091cffaa63dca964842773b1b5eb78d3b0b9ce4"} Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.898804 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.940994 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Sep 29 14:02:17 crc kubenswrapper[4869]: I0929 14:02:17.959724 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.5808729230000003 podStartE2EDuration="4.959708027s" podCreationTimestamp="2025-09-29 14:02:13 +0000 UTC" firstStartedPulling="2025-09-29 14:02:14.784528183 +0000 UTC m=+1261.225172493" lastFinishedPulling="2025-09-29 14:02:17.163363277 +0000 UTC m=+1263.604007597" observedRunningTime="2025-09-29 14:02:17.955735843 +0000 UTC m=+1264.396380163" watchObservedRunningTime="2025-09-29 14:02:17.959708027 +0000 UTC m=+1264.400352347" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.255179 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-lttz8" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.328458 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwqhr\" (UniqueName: \"kubernetes.io/projected/8adecd5f-ad38-4d6c-a1d7-f382f5357c48-kube-api-access-gwqhr\") pod \"8adecd5f-ad38-4d6c-a1d7-f382f5357c48\" (UID: \"8adecd5f-ad38-4d6c-a1d7-f382f5357c48\") " Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.341132 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8adecd5f-ad38-4d6c-a1d7-f382f5357c48-kube-api-access-gwqhr" (OuterVolumeSpecName: "kube-api-access-gwqhr") pod "8adecd5f-ad38-4d6c-a1d7-f382f5357c48" (UID: "8adecd5f-ad38-4d6c-a1d7-f382f5357c48"). InnerVolumeSpecName "kube-api-access-gwqhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.440508 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwqhr\" (UniqueName: \"kubernetes.io/projected/8adecd5f-ad38-4d6c-a1d7-f382f5357c48-kube-api-access-gwqhr\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.532521 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-c4pdp" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.540277 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-95tdg" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.653168 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgdwh\" (UniqueName: \"kubernetes.io/projected/8b222443-94e1-4451-9fdd-3240cba54a38-kube-api-access-bgdwh\") pod \"8b222443-94e1-4451-9fdd-3240cba54a38\" (UID: \"8b222443-94e1-4451-9fdd-3240cba54a38\") " Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.653321 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpjh8\" (UniqueName: \"kubernetes.io/projected/27cd3399-68ec-4d81-8a5c-695980634d8c-kube-api-access-rpjh8\") pod \"27cd3399-68ec-4d81-8a5c-695980634d8c\" (UID: \"27cd3399-68ec-4d81-8a5c-695980634d8c\") " Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.660542 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27cd3399-68ec-4d81-8a5c-695980634d8c-kube-api-access-rpjh8" (OuterVolumeSpecName: "kube-api-access-rpjh8") pod "27cd3399-68ec-4d81-8a5c-695980634d8c" (UID: "27cd3399-68ec-4d81-8a5c-695980634d8c"). InnerVolumeSpecName "kube-api-access-rpjh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.663771 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b222443-94e1-4451-9fdd-3240cba54a38-kube-api-access-bgdwh" (OuterVolumeSpecName: "kube-api-access-bgdwh") pod "8b222443-94e1-4451-9fdd-3240cba54a38" (UID: "8b222443-94e1-4451-9fdd-3240cba54a38"). InnerVolumeSpecName "kube-api-access-bgdwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.699936 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.755814 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgdwh\" (UniqueName: \"kubernetes.io/projected/8b222443-94e1-4451-9fdd-3240cba54a38-kube-api-access-bgdwh\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.755854 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpjh8\" (UniqueName: \"kubernetes.io/projected/27cd3399-68ec-4d81-8a5c-695980634d8c-kube-api-access-rpjh8\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.923400 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-c4pdp" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.923682 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-c4pdp" event={"ID":"8b222443-94e1-4451-9fdd-3240cba54a38","Type":"ContainerDied","Data":"d88b0ebdf81d2f263dea9c181091cffaa63dca964842773b1b5eb78d3b0b9ce4"} Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.923788 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d88b0ebdf81d2f263dea9c181091cffaa63dca964842773b1b5eb78d3b0b9ce4" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.925344 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-lttz8" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.925333 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-lttz8" event={"ID":"8adecd5f-ad38-4d6c-a1d7-f382f5357c48","Type":"ContainerDied","Data":"f1807f49ab53ddc3feb98423e499acc2c20ac76396f2736c32bf97652f75d3a3"} Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.925468 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1807f49ab53ddc3feb98423e499acc2c20ac76396f2736c32bf97652f75d3a3" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.928082 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-95tdg" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.928116 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-95tdg" event={"ID":"27cd3399-68ec-4d81-8a5c-695980634d8c","Type":"ContainerDied","Data":"3025dc53326848010d2281b7afebde3e8316135ca071b5c1fb2a53bc010272e3"} Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.928149 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3025dc53326848010d2281b7afebde3e8316135ca071b5c1fb2a53bc010272e3" Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.928527 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="ceilometer-central-agent" containerID="cri-o://9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c" gracePeriod=30 Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.928557 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="proxy-httpd" containerID="cri-o://59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae" gracePeriod=30 Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.928694 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="ceilometer-notification-agent" containerID="cri-o://5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab" gracePeriod=30 Sep 29 14:02:19 crc kubenswrapper[4869]: I0929 14:02:19.928750 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="sg-core" containerID="cri-o://375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e" gracePeriod=30 Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.871981 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939155 4869 generic.go:334] "Generic (PLEG): container finished" podID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerID="59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae" exitCode=0 Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939182 4869 generic.go:334] "Generic (PLEG): container finished" podID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerID="375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e" exitCode=2 Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939189 4869 generic.go:334] "Generic (PLEG): container finished" podID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerID="5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab" exitCode=0 Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939196 4869 generic.go:334] "Generic (PLEG): container finished" podID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerID="9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c" exitCode=0 Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939213 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerDied","Data":"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae"} Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939244 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerDied","Data":"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e"} Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939255 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerDied","Data":"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab"} Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939264 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerDied","Data":"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c"} Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939273 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8","Type":"ContainerDied","Data":"0e471e26759feb9d6371ec7a5e247254d4af1a1d66b11447a3a7a5bdacead639"} Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939289 4869 scope.go:117] "RemoveContainer" containerID="59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae" Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.939293 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.961395 4869 scope.go:117] "RemoveContainer" containerID="375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e" Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.980475 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-combined-ca-bundle\") pod \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.980561 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqp7n\" (UniqueName: \"kubernetes.io/projected/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-kube-api-access-gqp7n\") pod \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.980648 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-scripts\") pod \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.980703 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-log-httpd\") pod \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.980770 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-config-data\") pod \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.980794 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-run-httpd\") pod \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.980828 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-sg-core-conf-yaml\") pod \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\" (UID: \"f5d10c10-0ba4-42ab-ab64-c1211ca8efa8\") " Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.983860 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" (UID: "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.984173 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" (UID: "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.987033 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-scripts" (OuterVolumeSpecName: "scripts") pod "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" (UID: "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.987095 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-kube-api-access-gqp7n" (OuterVolumeSpecName: "kube-api-access-gqp7n") pod "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" (UID: "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8"). InnerVolumeSpecName "kube-api-access-gqp7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:20 crc kubenswrapper[4869]: I0929 14:02:20.989071 4869 scope.go:117] "RemoveContainer" containerID="5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.007150 4869 scope.go:117] "RemoveContainer" containerID="9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.013505 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" (UID: "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.029014 4869 scope.go:117] "RemoveContainer" containerID="59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.029817 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": container with ID starting with 59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae not found: ID does not exist" containerID="59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.029857 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae"} err="failed to get container status \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": rpc error: code = NotFound desc = could not find container \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": container with ID starting with 59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.029882 4869 scope.go:117] "RemoveContainer" containerID="375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.030262 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": container with ID starting with 375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e not found: ID does not exist" containerID="375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.030293 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e"} err="failed to get container status \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": rpc error: code = NotFound desc = could not find container \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": container with ID starting with 375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.030309 4869 scope.go:117] "RemoveContainer" containerID="5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.030665 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": container with ID starting with 5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab not found: ID does not exist" containerID="5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.030714 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab"} err="failed to get container status \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": rpc error: code = NotFound desc = could not find container \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": container with ID starting with 5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.030755 4869 scope.go:117] "RemoveContainer" containerID="9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.031048 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": container with ID starting with 9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c not found: ID does not exist" containerID="9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.031080 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c"} err="failed to get container status \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": rpc error: code = NotFound desc = could not find container \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": container with ID starting with 9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.031099 4869 scope.go:117] "RemoveContainer" containerID="59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.031477 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae"} err="failed to get container status \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": rpc error: code = NotFound desc = could not find container \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": container with ID starting with 59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.031511 4869 scope.go:117] "RemoveContainer" containerID="375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.031810 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e"} err="failed to get container status \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": rpc error: code = NotFound desc = could not find container \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": container with ID starting with 375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.031841 4869 scope.go:117] "RemoveContainer" containerID="5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.032062 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab"} err="failed to get container status \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": rpc error: code = NotFound desc = could not find container \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": container with ID starting with 5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.032085 4869 scope.go:117] "RemoveContainer" containerID="9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.032301 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c"} err="failed to get container status \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": rpc error: code = NotFound desc = could not find container \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": container with ID starting with 9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.032325 4869 scope.go:117] "RemoveContainer" containerID="59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.032554 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae"} err="failed to get container status \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": rpc error: code = NotFound desc = could not find container \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": container with ID starting with 59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.032577 4869 scope.go:117] "RemoveContainer" containerID="375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.032853 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e"} err="failed to get container status \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": rpc error: code = NotFound desc = could not find container \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": container with ID starting with 375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.032874 4869 scope.go:117] "RemoveContainer" containerID="5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.033542 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab"} err="failed to get container status \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": rpc error: code = NotFound desc = could not find container \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": container with ID starting with 5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.033574 4869 scope.go:117] "RemoveContainer" containerID="9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.033923 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c"} err="failed to get container status \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": rpc error: code = NotFound desc = could not find container \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": container with ID starting with 9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.033949 4869 scope.go:117] "RemoveContainer" containerID="59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.034227 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae"} err="failed to get container status \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": rpc error: code = NotFound desc = could not find container \"59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae\": container with ID starting with 59e19a053c0a657dd74b1edeebb79cf5d4d46dfe657d16af617f4bb3085628ae not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.034256 4869 scope.go:117] "RemoveContainer" containerID="375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.034510 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e"} err="failed to get container status \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": rpc error: code = NotFound desc = could not find container \"375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e\": container with ID starting with 375c47f3747d0496c218163447077e7cd7e23b1bd72c539531ee5572f84b676e not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.034533 4869 scope.go:117] "RemoveContainer" containerID="5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.034801 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab"} err="failed to get container status \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": rpc error: code = NotFound desc = could not find container \"5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab\": container with ID starting with 5233a9546c7367439df4fbea5f5a5d8199b8d9e6a45936223b7b85e601b1beab not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.034824 4869 scope.go:117] "RemoveContainer" containerID="9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.035031 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c"} err="failed to get container status \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": rpc error: code = NotFound desc = could not find container \"9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c\": container with ID starting with 9f383e6bb3dbadab6b2d6a66755151b61864f86a0cf6df70fd6a11992283ae5c not found: ID does not exist" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.066559 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" (UID: "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.079939 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-config-data" (OuterVolumeSpecName: "config-data") pod "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" (UID: "f5d10c10-0ba4-42ab-ab64-c1211ca8efa8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.090119 4869 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.090155 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.090185 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqp7n\" (UniqueName: \"kubernetes.io/projected/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-kube-api-access-gqp7n\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.090197 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.090206 4869 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.090214 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.090222 4869 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.268207 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.289477 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.309691 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.310140 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="proxy-httpd" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310156 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="proxy-httpd" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.310172 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="ceilometer-central-agent" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310178 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="ceilometer-central-agent" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.310199 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="sg-core" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310204 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="sg-core" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.310212 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cd3399-68ec-4d81-8a5c-695980634d8c" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310218 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cd3399-68ec-4d81-8a5c-695980634d8c" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.310225 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8adecd5f-ad38-4d6c-a1d7-f382f5357c48" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310231 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8adecd5f-ad38-4d6c-a1d7-f382f5357c48" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.310241 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b222443-94e1-4451-9fdd-3240cba54a38" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310248 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b222443-94e1-4451-9fdd-3240cba54a38" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: E0929 14:02:21.310259 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="ceilometer-notification-agent" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310265 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="ceilometer-notification-agent" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310422 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="sg-core" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310435 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="ceilometer-notification-agent" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310452 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="8adecd5f-ad38-4d6c-a1d7-f382f5357c48" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310465 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="proxy-httpd" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310476 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b222443-94e1-4451-9fdd-3240cba54a38" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310485 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="27cd3399-68ec-4d81-8a5c-695980634d8c" containerName="mariadb-database-create" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.310497 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" containerName="ceilometer-central-agent" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.322055 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.326280 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.326566 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.338909 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.394752 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-run-httpd\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.394874 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-log-httpd\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.394932 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.394985 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-config-data\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.395029 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.395055 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86b7c\" (UniqueName: \"kubernetes.io/projected/49825785-8ef7-441b-a867-a4a7111db36e-kube-api-access-86b7c\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.395095 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-scripts\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.497053 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-run-httpd\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.497167 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-log-httpd\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.497235 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.497326 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-config-data\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.497396 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.497426 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86b7c\" (UniqueName: \"kubernetes.io/projected/49825785-8ef7-441b-a867-a4a7111db36e-kube-api-access-86b7c\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.497467 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-run-httpd\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.497503 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-scripts\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.498209 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-log-httpd\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.506338 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.506921 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.513254 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-config-data\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.516654 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-scripts\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.521915 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86b7c\" (UniqueName: \"kubernetes.io/projected/49825785-8ef7-441b-a867-a4a7111db36e-kube-api-access-86b7c\") pod \"ceilometer-0\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " pod="openstack/ceilometer-0" Sep 29 14:02:21 crc kubenswrapper[4869]: I0929 14:02:21.654557 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:22 crc kubenswrapper[4869]: I0929 14:02:22.089232 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:22 crc kubenswrapper[4869]: I0929 14:02:22.252083 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5d10c10-0ba4-42ab-ab64-c1211ca8efa8" path="/var/lib/kubelet/pods/f5d10c10-0ba4-42ab-ab64-c1211ca8efa8/volumes" Sep 29 14:02:22 crc kubenswrapper[4869]: I0929 14:02:22.254114 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Sep 29 14:02:22 crc kubenswrapper[4869]: I0929 14:02:22.960413 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerStarted","Data":"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8"} Sep 29 14:02:22 crc kubenswrapper[4869]: I0929 14:02:22.960457 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerStarted","Data":"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d"} Sep 29 14:02:22 crc kubenswrapper[4869]: I0929 14:02:22.960470 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerStarted","Data":"9549f85f9b44ee3e4569b8435ca256f39fb43cbf6732b11706c605e691fae102"} Sep 29 14:02:23 crc kubenswrapper[4869]: I0929 14:02:23.972235 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerStarted","Data":"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62"} Sep 29 14:02:24 crc kubenswrapper[4869]: I0929 14:02:24.983642 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerStarted","Data":"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4"} Sep 29 14:02:24 crc kubenswrapper[4869]: I0929 14:02:24.983982 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:02:25 crc kubenswrapper[4869]: I0929 14:02:25.012504 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.733666259 podStartE2EDuration="4.01248132s" podCreationTimestamp="2025-09-29 14:02:21 +0000 UTC" firstStartedPulling="2025-09-29 14:02:22.109115919 +0000 UTC m=+1268.549760239" lastFinishedPulling="2025-09-29 14:02:24.38793097 +0000 UTC m=+1270.828575300" observedRunningTime="2025-09-29 14:02:25.005043316 +0000 UTC m=+1271.445687636" watchObservedRunningTime="2025-09-29 14:02:25.01248132 +0000 UTC m=+1271.453125640" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.527573 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-13a5-account-create-hl72l"] Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.534168 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-13a5-account-create-hl72l" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.536273 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.540210 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-13a5-account-create-hl72l"] Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.595394 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xdn2\" (UniqueName: \"kubernetes.io/projected/701be68f-d223-469d-a64c-f813a6254027-kube-api-access-2xdn2\") pod \"nova-api-13a5-account-create-hl72l\" (UID: \"701be68f-d223-469d-a64c-f813a6254027\") " pod="openstack/nova-api-13a5-account-create-hl72l" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.697322 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xdn2\" (UniqueName: \"kubernetes.io/projected/701be68f-d223-469d-a64c-f813a6254027-kube-api-access-2xdn2\") pod \"nova-api-13a5-account-create-hl72l\" (UID: \"701be68f-d223-469d-a64c-f813a6254027\") " pod="openstack/nova-api-13a5-account-create-hl72l" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.716946 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xdn2\" (UniqueName: \"kubernetes.io/projected/701be68f-d223-469d-a64c-f813a6254027-kube-api-access-2xdn2\") pod \"nova-api-13a5-account-create-hl72l\" (UID: \"701be68f-d223-469d-a64c-f813a6254027\") " pod="openstack/nova-api-13a5-account-create-hl72l" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.727257 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-150c-account-create-ftbhc"] Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.729311 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-150c-account-create-ftbhc" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.734815 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-150c-account-create-ftbhc"] Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.735642 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.799553 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtt5h\" (UniqueName: \"kubernetes.io/projected/4800f947-0c44-4799-bcd2-6fae29f29ff4-kube-api-access-vtt5h\") pod \"nova-cell0-150c-account-create-ftbhc\" (UID: \"4800f947-0c44-4799-bcd2-6fae29f29ff4\") " pod="openstack/nova-cell0-150c-account-create-ftbhc" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.823940 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-442e-account-create-5rm42"] Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.825170 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-442e-account-create-5rm42" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.828752 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.835200 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-442e-account-create-5rm42"] Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.852282 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-13a5-account-create-hl72l" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.901671 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p2gw\" (UniqueName: \"kubernetes.io/projected/bf7ac506-bb7e-4b3b-949c-d12dfae9f756-kube-api-access-8p2gw\") pod \"nova-cell1-442e-account-create-5rm42\" (UID: \"bf7ac506-bb7e-4b3b-949c-d12dfae9f756\") " pod="openstack/nova-cell1-442e-account-create-5rm42" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.902076 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtt5h\" (UniqueName: \"kubernetes.io/projected/4800f947-0c44-4799-bcd2-6fae29f29ff4-kube-api-access-vtt5h\") pod \"nova-cell0-150c-account-create-ftbhc\" (UID: \"4800f947-0c44-4799-bcd2-6fae29f29ff4\") " pod="openstack/nova-cell0-150c-account-create-ftbhc" Sep 29 14:02:26 crc kubenswrapper[4869]: I0929 14:02:26.932045 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtt5h\" (UniqueName: \"kubernetes.io/projected/4800f947-0c44-4799-bcd2-6fae29f29ff4-kube-api-access-vtt5h\") pod \"nova-cell0-150c-account-create-ftbhc\" (UID: \"4800f947-0c44-4799-bcd2-6fae29f29ff4\") " pod="openstack/nova-cell0-150c-account-create-ftbhc" Sep 29 14:02:27 crc kubenswrapper[4869]: I0929 14:02:27.003788 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p2gw\" (UniqueName: \"kubernetes.io/projected/bf7ac506-bb7e-4b3b-949c-d12dfae9f756-kube-api-access-8p2gw\") pod \"nova-cell1-442e-account-create-5rm42\" (UID: \"bf7ac506-bb7e-4b3b-949c-d12dfae9f756\") " pod="openstack/nova-cell1-442e-account-create-5rm42" Sep 29 14:02:27 crc kubenswrapper[4869]: I0929 14:02:27.020127 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p2gw\" (UniqueName: \"kubernetes.io/projected/bf7ac506-bb7e-4b3b-949c-d12dfae9f756-kube-api-access-8p2gw\") pod \"nova-cell1-442e-account-create-5rm42\" (UID: \"bf7ac506-bb7e-4b3b-949c-d12dfae9f756\") " pod="openstack/nova-cell1-442e-account-create-5rm42" Sep 29 14:02:27 crc kubenswrapper[4869]: I0929 14:02:27.081455 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-150c-account-create-ftbhc" Sep 29 14:02:27 crc kubenswrapper[4869]: I0929 14:02:27.139461 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-442e-account-create-5rm42" Sep 29 14:02:27 crc kubenswrapper[4869]: I0929 14:02:27.325071 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-13a5-account-create-hl72l"] Sep 29 14:02:27 crc kubenswrapper[4869]: W0929 14:02:27.337214 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod701be68f_d223_469d_a64c_f813a6254027.slice/crio-45dc8e93a41a08b605cc41b5907ed84c7e82af8453292b291a1c62a19cecaad0 WatchSource:0}: Error finding container 45dc8e93a41a08b605cc41b5907ed84c7e82af8453292b291a1c62a19cecaad0: Status 404 returned error can't find the container with id 45dc8e93a41a08b605cc41b5907ed84c7e82af8453292b291a1c62a19cecaad0 Sep 29 14:02:27 crc kubenswrapper[4869]: I0929 14:02:27.537052 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-150c-account-create-ftbhc"] Sep 29 14:02:27 crc kubenswrapper[4869]: I0929 14:02:27.671203 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-442e-account-create-5rm42"] Sep 29 14:02:27 crc kubenswrapper[4869]: W0929 14:02:27.727328 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf7ac506_bb7e_4b3b_949c_d12dfae9f756.slice/crio-e835c14a869367a8f5148b6e706ec400fffa31072bef2e2d2007d91dcfd54c91 WatchSource:0}: Error finding container e835c14a869367a8f5148b6e706ec400fffa31072bef2e2d2007d91dcfd54c91: Status 404 returned error can't find the container with id e835c14a869367a8f5148b6e706ec400fffa31072bef2e2d2007d91dcfd54c91 Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.013395 4869 generic.go:334] "Generic (PLEG): container finished" podID="701be68f-d223-469d-a64c-f813a6254027" containerID="7dc4147479864fbc36fde583a4a05c67a66870df6920b06020cef6996e86f127" exitCode=0 Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.013473 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-13a5-account-create-hl72l" event={"ID":"701be68f-d223-469d-a64c-f813a6254027","Type":"ContainerDied","Data":"7dc4147479864fbc36fde583a4a05c67a66870df6920b06020cef6996e86f127"} Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.013498 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-13a5-account-create-hl72l" event={"ID":"701be68f-d223-469d-a64c-f813a6254027","Type":"ContainerStarted","Data":"45dc8e93a41a08b605cc41b5907ed84c7e82af8453292b291a1c62a19cecaad0"} Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.015971 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-442e-account-create-5rm42" event={"ID":"bf7ac506-bb7e-4b3b-949c-d12dfae9f756","Type":"ContainerStarted","Data":"5fe133537285bd1796148e21aaaefa911a982f50107d5f7868e4515c3fa6b8e6"} Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.016040 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-442e-account-create-5rm42" event={"ID":"bf7ac506-bb7e-4b3b-949c-d12dfae9f756","Type":"ContainerStarted","Data":"e835c14a869367a8f5148b6e706ec400fffa31072bef2e2d2007d91dcfd54c91"} Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.017333 4869 generic.go:334] "Generic (PLEG): container finished" podID="4800f947-0c44-4799-bcd2-6fae29f29ff4" containerID="3557ecc99a01dd817e3db7d621bf4160aa2abc0c90697f30bac6d211d95d1fbb" exitCode=0 Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.017379 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-150c-account-create-ftbhc" event={"ID":"4800f947-0c44-4799-bcd2-6fae29f29ff4","Type":"ContainerDied","Data":"3557ecc99a01dd817e3db7d621bf4160aa2abc0c90697f30bac6d211d95d1fbb"} Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.017406 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-150c-account-create-ftbhc" event={"ID":"4800f947-0c44-4799-bcd2-6fae29f29ff4","Type":"ContainerStarted","Data":"e5037313efd716c52a6307b8ce9e8b4f4720e06a7227ef339012e86810cbb36f"} Sep 29 14:02:28 crc kubenswrapper[4869]: I0929 14:02:28.056547 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-442e-account-create-5rm42" podStartSLOduration=2.056527791 podStartE2EDuration="2.056527791s" podCreationTimestamp="2025-09-29 14:02:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:02:28.05455238 +0000 UTC m=+1274.495196690" watchObservedRunningTime="2025-09-29 14:02:28.056527791 +0000 UTC m=+1274.497172111" Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.029446 4869 generic.go:334] "Generic (PLEG): container finished" podID="bf7ac506-bb7e-4b3b-949c-d12dfae9f756" containerID="5fe133537285bd1796148e21aaaefa911a982f50107d5f7868e4515c3fa6b8e6" exitCode=0 Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.029502 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-442e-account-create-5rm42" event={"ID":"bf7ac506-bb7e-4b3b-949c-d12dfae9f756","Type":"ContainerDied","Data":"5fe133537285bd1796148e21aaaefa911a982f50107d5f7868e4515c3fa6b8e6"} Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.522996 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-13a5-account-create-hl72l" Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.529456 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-150c-account-create-ftbhc" Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.572385 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xdn2\" (UniqueName: \"kubernetes.io/projected/701be68f-d223-469d-a64c-f813a6254027-kube-api-access-2xdn2\") pod \"701be68f-d223-469d-a64c-f813a6254027\" (UID: \"701be68f-d223-469d-a64c-f813a6254027\") " Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.572472 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtt5h\" (UniqueName: \"kubernetes.io/projected/4800f947-0c44-4799-bcd2-6fae29f29ff4-kube-api-access-vtt5h\") pod \"4800f947-0c44-4799-bcd2-6fae29f29ff4\" (UID: \"4800f947-0c44-4799-bcd2-6fae29f29ff4\") " Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.578829 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/701be68f-d223-469d-a64c-f813a6254027-kube-api-access-2xdn2" (OuterVolumeSpecName: "kube-api-access-2xdn2") pod "701be68f-d223-469d-a64c-f813a6254027" (UID: "701be68f-d223-469d-a64c-f813a6254027"). InnerVolumeSpecName "kube-api-access-2xdn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.580302 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4800f947-0c44-4799-bcd2-6fae29f29ff4-kube-api-access-vtt5h" (OuterVolumeSpecName: "kube-api-access-vtt5h") pod "4800f947-0c44-4799-bcd2-6fae29f29ff4" (UID: "4800f947-0c44-4799-bcd2-6fae29f29ff4"). InnerVolumeSpecName "kube-api-access-vtt5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.674346 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xdn2\" (UniqueName: \"kubernetes.io/projected/701be68f-d223-469d-a64c-f813a6254027-kube-api-access-2xdn2\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.674574 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtt5h\" (UniqueName: \"kubernetes.io/projected/4800f947-0c44-4799-bcd2-6fae29f29ff4-kube-api-access-vtt5h\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.719513 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.719813 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="sg-core" containerID="cri-o://1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62" gracePeriod=30 Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.719835 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="ceilometer-central-agent" containerID="cri-o://4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d" gracePeriod=30 Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.719816 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="proxy-httpd" containerID="cri-o://61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4" gracePeriod=30 Sep 29 14:02:29 crc kubenswrapper[4869]: I0929 14:02:29.719906 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="ceilometer-notification-agent" containerID="cri-o://22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8" gracePeriod=30 Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.039346 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-150c-account-create-ftbhc" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.039372 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-150c-account-create-ftbhc" event={"ID":"4800f947-0c44-4799-bcd2-6fae29f29ff4","Type":"ContainerDied","Data":"e5037313efd716c52a6307b8ce9e8b4f4720e06a7227ef339012e86810cbb36f"} Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.039431 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5037313efd716c52a6307b8ce9e8b4f4720e06a7227ef339012e86810cbb36f" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.041780 4869 generic.go:334] "Generic (PLEG): container finished" podID="49825785-8ef7-441b-a867-a4a7111db36e" containerID="61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4" exitCode=0 Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.041804 4869 generic.go:334] "Generic (PLEG): container finished" podID="49825785-8ef7-441b-a867-a4a7111db36e" containerID="1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62" exitCode=2 Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.041877 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerDied","Data":"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4"} Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.041896 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerDied","Data":"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62"} Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.043721 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-13a5-account-create-hl72l" event={"ID":"701be68f-d223-469d-a64c-f813a6254027","Type":"ContainerDied","Data":"45dc8e93a41a08b605cc41b5907ed84c7e82af8453292b291a1c62a19cecaad0"} Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.043772 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45dc8e93a41a08b605cc41b5907ed84c7e82af8453292b291a1c62a19cecaad0" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.043745 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-13a5-account-create-hl72l" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.341575 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-442e-account-create-5rm42" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.389439 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p2gw\" (UniqueName: \"kubernetes.io/projected/bf7ac506-bb7e-4b3b-949c-d12dfae9f756-kube-api-access-8p2gw\") pod \"bf7ac506-bb7e-4b3b-949c-d12dfae9f756\" (UID: \"bf7ac506-bb7e-4b3b-949c-d12dfae9f756\") " Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.393918 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf7ac506-bb7e-4b3b-949c-d12dfae9f756-kube-api-access-8p2gw" (OuterVolumeSpecName: "kube-api-access-8p2gw") pod "bf7ac506-bb7e-4b3b-949c-d12dfae9f756" (UID: "bf7ac506-bb7e-4b3b-949c-d12dfae9f756"). InnerVolumeSpecName "kube-api-access-8p2gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.492711 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p2gw\" (UniqueName: \"kubernetes.io/projected/bf7ac506-bb7e-4b3b-949c-d12dfae9f756-kube-api-access-8p2gw\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.655234 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.696403 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-log-httpd\") pod \"49825785-8ef7-441b-a867-a4a7111db36e\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.696535 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-run-httpd\") pod \"49825785-8ef7-441b-a867-a4a7111db36e\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.696749 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-sg-core-conf-yaml\") pod \"49825785-8ef7-441b-a867-a4a7111db36e\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.696827 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-config-data\") pod \"49825785-8ef7-441b-a867-a4a7111db36e\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.696909 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86b7c\" (UniqueName: \"kubernetes.io/projected/49825785-8ef7-441b-a867-a4a7111db36e-kube-api-access-86b7c\") pod \"49825785-8ef7-441b-a867-a4a7111db36e\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.697032 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-combined-ca-bundle\") pod \"49825785-8ef7-441b-a867-a4a7111db36e\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.697143 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-scripts\") pod \"49825785-8ef7-441b-a867-a4a7111db36e\" (UID: \"49825785-8ef7-441b-a867-a4a7111db36e\") " Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.698052 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "49825785-8ef7-441b-a867-a4a7111db36e" (UID: "49825785-8ef7-441b-a867-a4a7111db36e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.698104 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "49825785-8ef7-441b-a867-a4a7111db36e" (UID: "49825785-8ef7-441b-a867-a4a7111db36e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.698184 4869 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.706908 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49825785-8ef7-441b-a867-a4a7111db36e-kube-api-access-86b7c" (OuterVolumeSpecName: "kube-api-access-86b7c") pod "49825785-8ef7-441b-a867-a4a7111db36e" (UID: "49825785-8ef7-441b-a867-a4a7111db36e"). InnerVolumeSpecName "kube-api-access-86b7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.710142 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-scripts" (OuterVolumeSpecName: "scripts") pod "49825785-8ef7-441b-a867-a4a7111db36e" (UID: "49825785-8ef7-441b-a867-a4a7111db36e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.737567 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "49825785-8ef7-441b-a867-a4a7111db36e" (UID: "49825785-8ef7-441b-a867-a4a7111db36e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.784625 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49825785-8ef7-441b-a867-a4a7111db36e" (UID: "49825785-8ef7-441b-a867-a4a7111db36e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.799701 4869 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.799733 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86b7c\" (UniqueName: \"kubernetes.io/projected/49825785-8ef7-441b-a867-a4a7111db36e-kube-api-access-86b7c\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.799744 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.799753 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.799762 4869 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49825785-8ef7-441b-a867-a4a7111db36e-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.826710 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-config-data" (OuterVolumeSpecName: "config-data") pod "49825785-8ef7-441b-a867-a4a7111db36e" (UID: "49825785-8ef7-441b-a867-a4a7111db36e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:30 crc kubenswrapper[4869]: I0929 14:02:30.901313 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49825785-8ef7-441b-a867-a4a7111db36e-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.054901 4869 generic.go:334] "Generic (PLEG): container finished" podID="49825785-8ef7-441b-a867-a4a7111db36e" containerID="22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8" exitCode=0 Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.054934 4869 generic.go:334] "Generic (PLEG): container finished" podID="49825785-8ef7-441b-a867-a4a7111db36e" containerID="4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d" exitCode=0 Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.054967 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.054980 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerDied","Data":"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8"} Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.055009 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerDied","Data":"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d"} Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.055020 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49825785-8ef7-441b-a867-a4a7111db36e","Type":"ContainerDied","Data":"9549f85f9b44ee3e4569b8435ca256f39fb43cbf6732b11706c605e691fae102"} Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.055035 4869 scope.go:117] "RemoveContainer" containerID="61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.056981 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-442e-account-create-5rm42" event={"ID":"bf7ac506-bb7e-4b3b-949c-d12dfae9f756","Type":"ContainerDied","Data":"e835c14a869367a8f5148b6e706ec400fffa31072bef2e2d2007d91dcfd54c91"} Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.057007 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e835c14a869367a8f5148b6e706ec400fffa31072bef2e2d2007d91dcfd54c91" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.057053 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-442e-account-create-5rm42" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.094950 4869 scope.go:117] "RemoveContainer" containerID="1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.111019 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.124971 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.129199 4869 scope.go:117] "RemoveContainer" containerID="22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.138207 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.139964 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="proxy-httpd" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.140068 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="proxy-httpd" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.140147 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf7ac506-bb7e-4b3b-949c-d12dfae9f756" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.140253 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf7ac506-bb7e-4b3b-949c-d12dfae9f756" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.140347 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="ceilometer-central-agent" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.140402 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="ceilometer-central-agent" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.140464 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="sg-core" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.143722 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="sg-core" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.143816 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="701be68f-d223-469d-a64c-f813a6254027" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.143830 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="701be68f-d223-469d-a64c-f813a6254027" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.143881 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="ceilometer-notification-agent" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.143890 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="ceilometer-notification-agent" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.143901 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4800f947-0c44-4799-bcd2-6fae29f29ff4" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.143908 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4800f947-0c44-4799-bcd2-6fae29f29ff4" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.144276 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="ceilometer-central-agent" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.144290 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf7ac506-bb7e-4b3b-949c-d12dfae9f756" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.144300 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="ceilometer-notification-agent" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.144312 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4800f947-0c44-4799-bcd2-6fae29f29ff4" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.144338 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="701be68f-d223-469d-a64c-f813a6254027" containerName="mariadb-account-create" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.144361 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="sg-core" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.144381 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="49825785-8ef7-441b-a867-a4a7111db36e" containerName="proxy-httpd" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.146419 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.149182 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.149526 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.150650 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.171184 4869 scope.go:117] "RemoveContainer" containerID="4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.204107 4869 scope.go:117] "RemoveContainer" containerID="61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.206602 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4\": container with ID starting with 61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4 not found: ID does not exist" containerID="61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.206667 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4"} err="failed to get container status \"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4\": rpc error: code = NotFound desc = could not find container \"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4\": container with ID starting with 61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4 not found: ID does not exist" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.206699 4869 scope.go:117] "RemoveContainer" containerID="1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.207063 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62\": container with ID starting with 1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62 not found: ID does not exist" containerID="1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207088 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62"} err="failed to get container status \"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62\": rpc error: code = NotFound desc = could not find container \"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62\": container with ID starting with 1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62 not found: ID does not exist" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207104 4869 scope.go:117] "RemoveContainer" containerID="22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.207404 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8\": container with ID starting with 22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8 not found: ID does not exist" containerID="22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207429 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8"} err="failed to get container status \"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8\": rpc error: code = NotFound desc = could not find container \"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8\": container with ID starting with 22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8 not found: ID does not exist" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207443 4869 scope.go:117] "RemoveContainer" containerID="4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207737 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-config-data\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207778 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-run-httpd\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: E0929 14:02:31.207755 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d\": container with ID starting with 4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d not found: ID does not exist" containerID="4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207808 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d"} err="failed to get container status \"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d\": rpc error: code = NotFound desc = could not find container \"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d\": container with ID starting with 4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d not found: ID does not exist" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207818 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207823 4869 scope.go:117] "RemoveContainer" containerID="61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207945 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srvph\" (UniqueName: \"kubernetes.io/projected/799a00e2-c4f4-4905-a130-e0b082840897-kube-api-access-srvph\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.207993 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-log-httpd\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.208181 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.208460 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-scripts\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.208498 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4"} err="failed to get container status \"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4\": rpc error: code = NotFound desc = could not find container \"61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4\": container with ID starting with 61cc5131cb8082f8aa93949adcee19263e6cb02d80ee22d96bc5a4bcc59eb5f4 not found: ID does not exist" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.208533 4869 scope.go:117] "RemoveContainer" containerID="1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.209043 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62"} err="failed to get container status \"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62\": rpc error: code = NotFound desc = could not find container \"1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62\": container with ID starting with 1c3ceeba8686dd4dc5ad58f8f64b9f984977f6a90f61dbce00335ee6edd57a62 not found: ID does not exist" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.209065 4869 scope.go:117] "RemoveContainer" containerID="22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.209575 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8"} err="failed to get container status \"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8\": rpc error: code = NotFound desc = could not find container \"22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8\": container with ID starting with 22d8bb1701626f04b59994b85c275c7eb7726b96099c39d26f8a245fb024fbb8 not found: ID does not exist" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.209599 4869 scope.go:117] "RemoveContainer" containerID="4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.210027 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d"} err="failed to get container status \"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d\": rpc error: code = NotFound desc = could not find container \"4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d\": container with ID starting with 4bbb5a7040c604619b603437c639ca3a063d3aa5f67094f3e5e91bf77e06af2d not found: ID does not exist" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.310545 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.310675 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-scripts\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.310727 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-config-data\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.310749 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-run-httpd\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.310791 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.310838 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srvph\" (UniqueName: \"kubernetes.io/projected/799a00e2-c4f4-4905-a130-e0b082840897-kube-api-access-srvph\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.310868 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-log-httpd\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.311295 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-log-httpd\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.311929 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-run-httpd\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.315278 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-scripts\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.315322 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.318250 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-config-data\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.323752 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.331178 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srvph\" (UniqueName: \"kubernetes.io/projected/799a00e2-c4f4-4905-a130-e0b082840897-kube-api-access-srvph\") pod \"ceilometer-0\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.485668 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.880375 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.964604 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.992352 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9fqpp"] Sep 29 14:02:31 crc kubenswrapper[4869]: I0929 14:02:31.994783 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:31.999977 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.000280 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-vkql4" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.011944 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9fqpp"] Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.012231 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.032147 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.032220 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-config-data\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.032276 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgv9c\" (UniqueName: \"kubernetes.io/projected/74c97303-a69c-471b-8b23-4a72ec813beb-kube-api-access-vgv9c\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.032325 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-scripts\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.069367 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerStarted","Data":"6f324e9f2f270cc6ede5de3247bfa91ac132d20ba0959a48ff2d6fc1bba615f9"} Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.134128 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.134216 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-config-data\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.134275 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgv9c\" (UniqueName: \"kubernetes.io/projected/74c97303-a69c-471b-8b23-4a72ec813beb-kube-api-access-vgv9c\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.134338 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-scripts\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.138158 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-config-data\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.138437 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-scripts\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.140930 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.154452 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgv9c\" (UniqueName: \"kubernetes.io/projected/74c97303-a69c-471b-8b23-4a72ec813beb-kube-api-access-vgv9c\") pod \"nova-cell0-conductor-db-sync-9fqpp\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.254334 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49825785-8ef7-441b-a867-a4a7111db36e" path="/var/lib/kubelet/pods/49825785-8ef7-441b-a867-a4a7111db36e/volumes" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.319760 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:32 crc kubenswrapper[4869]: I0929 14:02:32.796954 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9fqpp"] Sep 29 14:02:33 crc kubenswrapper[4869]: I0929 14:02:33.085927 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" event={"ID":"74c97303-a69c-471b-8b23-4a72ec813beb","Type":"ContainerStarted","Data":"1be4d4b394a6f8e04de85379bb7bd863b8a19b23f4a74e98c8c96770bdd3a751"} Sep 29 14:02:33 crc kubenswrapper[4869]: I0929 14:02:33.088943 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerStarted","Data":"5e67c73719a3f3b147743a37936560ae5241b754438c6cfa9c6fabc75d18900f"} Sep 29 14:02:33 crc kubenswrapper[4869]: I0929 14:02:33.089073 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerStarted","Data":"7e56bcaf25ae92801dda9bb44a84ae810b05c23ec22aca2b2feac60eeb0b9c81"} Sep 29 14:02:35 crc kubenswrapper[4869]: I0929 14:02:35.112470 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerStarted","Data":"4cec03146a7c78b50689eff2fd4883aa26a9bef6af8ded896d900517ad397bab"} Sep 29 14:02:36 crc kubenswrapper[4869]: I0929 14:02:36.146985 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerStarted","Data":"65b022aea4190cc24e902d8950ab1d9be415d5be9ad25e1662ac51f952edbbaa"} Sep 29 14:02:36 crc kubenswrapper[4869]: I0929 14:02:36.147194 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="ceilometer-central-agent" containerID="cri-o://7e56bcaf25ae92801dda9bb44a84ae810b05c23ec22aca2b2feac60eeb0b9c81" gracePeriod=30 Sep 29 14:02:36 crc kubenswrapper[4869]: I0929 14:02:36.147438 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:02:36 crc kubenswrapper[4869]: I0929 14:02:36.147727 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="proxy-httpd" containerID="cri-o://65b022aea4190cc24e902d8950ab1d9be415d5be9ad25e1662ac51f952edbbaa" gracePeriod=30 Sep 29 14:02:36 crc kubenswrapper[4869]: I0929 14:02:36.147770 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="sg-core" containerID="cri-o://4cec03146a7c78b50689eff2fd4883aa26a9bef6af8ded896d900517ad397bab" gracePeriod=30 Sep 29 14:02:36 crc kubenswrapper[4869]: I0929 14:02:36.147803 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="ceilometer-notification-agent" containerID="cri-o://5e67c73719a3f3b147743a37936560ae5241b754438c6cfa9c6fabc75d18900f" gracePeriod=30 Sep 29 14:02:36 crc kubenswrapper[4869]: I0929 14:02:36.181801 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.375780773 podStartE2EDuration="5.18178061s" podCreationTimestamp="2025-09-29 14:02:31 +0000 UTC" firstStartedPulling="2025-09-29 14:02:31.965077037 +0000 UTC m=+1278.405721357" lastFinishedPulling="2025-09-29 14:02:35.771076874 +0000 UTC m=+1282.211721194" observedRunningTime="2025-09-29 14:02:36.177037797 +0000 UTC m=+1282.617682117" watchObservedRunningTime="2025-09-29 14:02:36.18178061 +0000 UTC m=+1282.622424930" Sep 29 14:02:37 crc kubenswrapper[4869]: I0929 14:02:37.159100 4869 generic.go:334] "Generic (PLEG): container finished" podID="799a00e2-c4f4-4905-a130-e0b082840897" containerID="65b022aea4190cc24e902d8950ab1d9be415d5be9ad25e1662ac51f952edbbaa" exitCode=0 Sep 29 14:02:37 crc kubenswrapper[4869]: I0929 14:02:37.159424 4869 generic.go:334] "Generic (PLEG): container finished" podID="799a00e2-c4f4-4905-a130-e0b082840897" containerID="4cec03146a7c78b50689eff2fd4883aa26a9bef6af8ded896d900517ad397bab" exitCode=2 Sep 29 14:02:37 crc kubenswrapper[4869]: I0929 14:02:37.159438 4869 generic.go:334] "Generic (PLEG): container finished" podID="799a00e2-c4f4-4905-a130-e0b082840897" containerID="5e67c73719a3f3b147743a37936560ae5241b754438c6cfa9c6fabc75d18900f" exitCode=0 Sep 29 14:02:37 crc kubenswrapper[4869]: I0929 14:02:37.159462 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerDied","Data":"65b022aea4190cc24e902d8950ab1d9be415d5be9ad25e1662ac51f952edbbaa"} Sep 29 14:02:37 crc kubenswrapper[4869]: I0929 14:02:37.159495 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerDied","Data":"4cec03146a7c78b50689eff2fd4883aa26a9bef6af8ded896d900517ad397bab"} Sep 29 14:02:37 crc kubenswrapper[4869]: I0929 14:02:37.159510 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerDied","Data":"5e67c73719a3f3b147743a37936560ae5241b754438c6cfa9c6fabc75d18900f"} Sep 29 14:02:40 crc kubenswrapper[4869]: I0929 14:02:40.191693 4869 generic.go:334] "Generic (PLEG): container finished" podID="799a00e2-c4f4-4905-a130-e0b082840897" containerID="7e56bcaf25ae92801dda9bb44a84ae810b05c23ec22aca2b2feac60eeb0b9c81" exitCode=0 Sep 29 14:02:40 crc kubenswrapper[4869]: I0929 14:02:40.191831 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerDied","Data":"7e56bcaf25ae92801dda9bb44a84ae810b05c23ec22aca2b2feac60eeb0b9c81"} Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.185780 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.225783 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" event={"ID":"74c97303-a69c-471b-8b23-4a72ec813beb","Type":"ContainerStarted","Data":"986cf25b2326f69c3ca381493d6e256e0fecbacaeaa1d40aa16fc00927b8b791"} Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.229819 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"799a00e2-c4f4-4905-a130-e0b082840897","Type":"ContainerDied","Data":"6f324e9f2f270cc6ede5de3247bfa91ac132d20ba0959a48ff2d6fc1bba615f9"} Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.229873 4869 scope.go:117] "RemoveContainer" containerID="65b022aea4190cc24e902d8950ab1d9be415d5be9ad25e1662ac51f952edbbaa" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.230035 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.244184 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" podStartSLOduration=2.152223515 podStartE2EDuration="11.244166845s" podCreationTimestamp="2025-09-29 14:02:31 +0000 UTC" firstStartedPulling="2025-09-29 14:02:32.802778063 +0000 UTC m=+1279.243422383" lastFinishedPulling="2025-09-29 14:02:41.894721393 +0000 UTC m=+1288.335365713" observedRunningTime="2025-09-29 14:02:42.24398388 +0000 UTC m=+1288.684628220" watchObservedRunningTime="2025-09-29 14:02:42.244166845 +0000 UTC m=+1288.684811165" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.253221 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-log-httpd\") pod \"799a00e2-c4f4-4905-a130-e0b082840897\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.253321 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srvph\" (UniqueName: \"kubernetes.io/projected/799a00e2-c4f4-4905-a130-e0b082840897-kube-api-access-srvph\") pod \"799a00e2-c4f4-4905-a130-e0b082840897\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.253452 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-combined-ca-bundle\") pod \"799a00e2-c4f4-4905-a130-e0b082840897\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.253500 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-sg-core-conf-yaml\") pod \"799a00e2-c4f4-4905-a130-e0b082840897\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.253544 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-scripts\") pod \"799a00e2-c4f4-4905-a130-e0b082840897\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.253629 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-config-data\") pod \"799a00e2-c4f4-4905-a130-e0b082840897\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.253699 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-run-httpd\") pod \"799a00e2-c4f4-4905-a130-e0b082840897\" (UID: \"799a00e2-c4f4-4905-a130-e0b082840897\") " Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.255439 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "799a00e2-c4f4-4905-a130-e0b082840897" (UID: "799a00e2-c4f4-4905-a130-e0b082840897"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.255725 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "799a00e2-c4f4-4905-a130-e0b082840897" (UID: "799a00e2-c4f4-4905-a130-e0b082840897"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.256772 4869 scope.go:117] "RemoveContainer" containerID="4cec03146a7c78b50689eff2fd4883aa26a9bef6af8ded896d900517ad397bab" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.259977 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-scripts" (OuterVolumeSpecName: "scripts") pod "799a00e2-c4f4-4905-a130-e0b082840897" (UID: "799a00e2-c4f4-4905-a130-e0b082840897"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.268971 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/799a00e2-c4f4-4905-a130-e0b082840897-kube-api-access-srvph" (OuterVolumeSpecName: "kube-api-access-srvph") pod "799a00e2-c4f4-4905-a130-e0b082840897" (UID: "799a00e2-c4f4-4905-a130-e0b082840897"). InnerVolumeSpecName "kube-api-access-srvph". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.280864 4869 scope.go:117] "RemoveContainer" containerID="5e67c73719a3f3b147743a37936560ae5241b754438c6cfa9c6fabc75d18900f" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.285713 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "799a00e2-c4f4-4905-a130-e0b082840897" (UID: "799a00e2-c4f4-4905-a130-e0b082840897"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.300384 4869 scope.go:117] "RemoveContainer" containerID="7e56bcaf25ae92801dda9bb44a84ae810b05c23ec22aca2b2feac60eeb0b9c81" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.338010 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "799a00e2-c4f4-4905-a130-e0b082840897" (UID: "799a00e2-c4f4-4905-a130-e0b082840897"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.356371 4869 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.356402 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srvph\" (UniqueName: \"kubernetes.io/projected/799a00e2-c4f4-4905-a130-e0b082840897-kube-api-access-srvph\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.356414 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.356422 4869 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.356431 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.356440 4869 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/799a00e2-c4f4-4905-a130-e0b082840897-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.361030 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-config-data" (OuterVolumeSpecName: "config-data") pod "799a00e2-c4f4-4905-a130-e0b082840897" (UID: "799a00e2-c4f4-4905-a130-e0b082840897"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.458418 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/799a00e2-c4f4-4905-a130-e0b082840897-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.581044 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.594894 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.605188 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:42 crc kubenswrapper[4869]: E0929 14:02:42.605742 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="sg-core" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.605766 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="sg-core" Sep 29 14:02:42 crc kubenswrapper[4869]: E0929 14:02:42.605781 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="ceilometer-central-agent" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.605789 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="ceilometer-central-agent" Sep 29 14:02:42 crc kubenswrapper[4869]: E0929 14:02:42.605824 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="ceilometer-notification-agent" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.605833 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="ceilometer-notification-agent" Sep 29 14:02:42 crc kubenswrapper[4869]: E0929 14:02:42.605844 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="proxy-httpd" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.605851 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="proxy-httpd" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.606099 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="proxy-httpd" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.606123 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="ceilometer-notification-agent" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.606140 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="sg-core" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.606160 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="799a00e2-c4f4-4905-a130-e0b082840897" containerName="ceilometer-central-agent" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.617094 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.617208 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.622095 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.622444 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.662535 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-log-httpd\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.662572 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-config-data\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.662627 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-scripts\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.662669 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.662757 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.662802 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4pgp\" (UniqueName: \"kubernetes.io/projected/0c136758-01e1-4dc6-ae25-73061febf56d-kube-api-access-t4pgp\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.662928 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-run-httpd\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.764636 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-log-httpd\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.764687 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-config-data\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.764740 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-scripts\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.764781 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.764918 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.765044 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4pgp\" (UniqueName: \"kubernetes.io/projected/0c136758-01e1-4dc6-ae25-73061febf56d-kube-api-access-t4pgp\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.765106 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-run-httpd\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.765161 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-log-httpd\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.765958 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-run-httpd\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.768410 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-config-data\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.768941 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.769293 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.769347 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-scripts\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.786999 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4pgp\" (UniqueName: \"kubernetes.io/projected/0c136758-01e1-4dc6-ae25-73061febf56d-kube-api-access-t4pgp\") pod \"ceilometer-0\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " pod="openstack/ceilometer-0" Sep 29 14:02:42 crc kubenswrapper[4869]: I0929 14:02:42.932914 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:43 crc kubenswrapper[4869]: I0929 14:02:43.359144 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:44 crc kubenswrapper[4869]: I0929 14:02:44.271391 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="799a00e2-c4f4-4905-a130-e0b082840897" path="/var/lib/kubelet/pods/799a00e2-c4f4-4905-a130-e0b082840897/volumes" Sep 29 14:02:44 crc kubenswrapper[4869]: I0929 14:02:44.280289 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerStarted","Data":"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994"} Sep 29 14:02:44 crc kubenswrapper[4869]: I0929 14:02:44.280334 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerStarted","Data":"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6"} Sep 29 14:02:44 crc kubenswrapper[4869]: I0929 14:02:44.280345 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerStarted","Data":"acc45acb25fb1cc2fec2d1eddd788a5451101645ec93212ed021b2d39bf27aab"} Sep 29 14:02:45 crc kubenswrapper[4869]: I0929 14:02:45.295906 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerStarted","Data":"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580"} Sep 29 14:02:47 crc kubenswrapper[4869]: I0929 14:02:47.316745 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerStarted","Data":"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111"} Sep 29 14:02:47 crc kubenswrapper[4869]: I0929 14:02:47.317346 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:02:47 crc kubenswrapper[4869]: I0929 14:02:47.341718 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.378574881 podStartE2EDuration="5.341701237s" podCreationTimestamp="2025-09-29 14:02:42 +0000 UTC" firstStartedPulling="2025-09-29 14:02:43.371653471 +0000 UTC m=+1289.812297811" lastFinishedPulling="2025-09-29 14:02:46.334779847 +0000 UTC m=+1292.775424167" observedRunningTime="2025-09-29 14:02:47.333395231 +0000 UTC m=+1293.774039551" watchObservedRunningTime="2025-09-29 14:02:47.341701237 +0000 UTC m=+1293.782345557" Sep 29 14:02:48 crc kubenswrapper[4869]: I0929 14:02:48.345713 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:49 crc kubenswrapper[4869]: I0929 14:02:49.333803 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="ceilometer-central-agent" containerID="cri-o://2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6" gracePeriod=30 Sep 29 14:02:49 crc kubenswrapper[4869]: I0929 14:02:49.333839 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="sg-core" containerID="cri-o://3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580" gracePeriod=30 Sep 29 14:02:49 crc kubenswrapper[4869]: I0929 14:02:49.333837 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="proxy-httpd" containerID="cri-o://c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111" gracePeriod=30 Sep 29 14:02:49 crc kubenswrapper[4869]: I0929 14:02:49.333882 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="ceilometer-notification-agent" containerID="cri-o://06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994" gracePeriod=30 Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.173245 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.327296 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-config-data\") pod \"0c136758-01e1-4dc6-ae25-73061febf56d\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.327576 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-run-httpd\") pod \"0c136758-01e1-4dc6-ae25-73061febf56d\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.327704 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-scripts\") pod \"0c136758-01e1-4dc6-ae25-73061febf56d\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.327821 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-sg-core-conf-yaml\") pod \"0c136758-01e1-4dc6-ae25-73061febf56d\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.327932 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4pgp\" (UniqueName: \"kubernetes.io/projected/0c136758-01e1-4dc6-ae25-73061febf56d-kube-api-access-t4pgp\") pod \"0c136758-01e1-4dc6-ae25-73061febf56d\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.328038 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-combined-ca-bundle\") pod \"0c136758-01e1-4dc6-ae25-73061febf56d\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.328387 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-log-httpd\") pod \"0c136758-01e1-4dc6-ae25-73061febf56d\" (UID: \"0c136758-01e1-4dc6-ae25-73061febf56d\") " Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.328404 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0c136758-01e1-4dc6-ae25-73061febf56d" (UID: "0c136758-01e1-4dc6-ae25-73061febf56d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.329818 4869 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.330946 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0c136758-01e1-4dc6-ae25-73061febf56d" (UID: "0c136758-01e1-4dc6-ae25-73061febf56d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.333336 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-scripts" (OuterVolumeSpecName: "scripts") pod "0c136758-01e1-4dc6-ae25-73061febf56d" (UID: "0c136758-01e1-4dc6-ae25-73061febf56d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.333582 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c136758-01e1-4dc6-ae25-73061febf56d-kube-api-access-t4pgp" (OuterVolumeSpecName: "kube-api-access-t4pgp") pod "0c136758-01e1-4dc6-ae25-73061febf56d" (UID: "0c136758-01e1-4dc6-ae25-73061febf56d"). InnerVolumeSpecName "kube-api-access-t4pgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358417 4869 generic.go:334] "Generic (PLEG): container finished" podID="0c136758-01e1-4dc6-ae25-73061febf56d" containerID="c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111" exitCode=0 Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358454 4869 generic.go:334] "Generic (PLEG): container finished" podID="0c136758-01e1-4dc6-ae25-73061febf56d" containerID="3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580" exitCode=2 Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358440 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0c136758-01e1-4dc6-ae25-73061febf56d" (UID: "0c136758-01e1-4dc6-ae25-73061febf56d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358483 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerDied","Data":"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111"} Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358500 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358531 4869 scope.go:117] "RemoveContainer" containerID="c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358517 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerDied","Data":"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580"} Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358466 4869 generic.go:334] "Generic (PLEG): container finished" podID="0c136758-01e1-4dc6-ae25-73061febf56d" containerID="06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994" exitCode=0 Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358644 4869 generic.go:334] "Generic (PLEG): container finished" podID="0c136758-01e1-4dc6-ae25-73061febf56d" containerID="2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6" exitCode=0 Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358850 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerDied","Data":"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994"} Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358901 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerDied","Data":"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6"} Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.358925 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c136758-01e1-4dc6-ae25-73061febf56d","Type":"ContainerDied","Data":"acc45acb25fb1cc2fec2d1eddd788a5451101645ec93212ed021b2d39bf27aab"} Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.387079 4869 scope.go:117] "RemoveContainer" containerID="3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.408497 4869 scope.go:117] "RemoveContainer" containerID="06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.424448 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c136758-01e1-4dc6-ae25-73061febf56d" (UID: "0c136758-01e1-4dc6-ae25-73061febf56d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.427897 4869 scope.go:117] "RemoveContainer" containerID="2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.431859 4869 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c136758-01e1-4dc6-ae25-73061febf56d-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.431886 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.431895 4869 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.431907 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4pgp\" (UniqueName: \"kubernetes.io/projected/0c136758-01e1-4dc6-ae25-73061febf56d-kube-api-access-t4pgp\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.431916 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.437376 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-config-data" (OuterVolumeSpecName: "config-data") pod "0c136758-01e1-4dc6-ae25-73061febf56d" (UID: "0c136758-01e1-4dc6-ae25-73061febf56d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.446910 4869 scope.go:117] "RemoveContainer" containerID="c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111" Sep 29 14:02:50 crc kubenswrapper[4869]: E0929 14:02:50.447364 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": container with ID starting with c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111 not found: ID does not exist" containerID="c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.447408 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111"} err="failed to get container status \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": rpc error: code = NotFound desc = could not find container \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": container with ID starting with c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.447437 4869 scope.go:117] "RemoveContainer" containerID="3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580" Sep 29 14:02:50 crc kubenswrapper[4869]: E0929 14:02:50.447744 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": container with ID starting with 3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580 not found: ID does not exist" containerID="3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.447790 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580"} err="failed to get container status \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": rpc error: code = NotFound desc = could not find container \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": container with ID starting with 3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.447812 4869 scope.go:117] "RemoveContainer" containerID="06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994" Sep 29 14:02:50 crc kubenswrapper[4869]: E0929 14:02:50.448015 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": container with ID starting with 06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994 not found: ID does not exist" containerID="06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.448062 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994"} err="failed to get container status \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": rpc error: code = NotFound desc = could not find container \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": container with ID starting with 06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.448075 4869 scope.go:117] "RemoveContainer" containerID="2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6" Sep 29 14:02:50 crc kubenswrapper[4869]: E0929 14:02:50.448272 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": container with ID starting with 2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6 not found: ID does not exist" containerID="2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.448294 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6"} err="failed to get container status \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": rpc error: code = NotFound desc = could not find container \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": container with ID starting with 2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.448310 4869 scope.go:117] "RemoveContainer" containerID="c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.448592 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111"} err="failed to get container status \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": rpc error: code = NotFound desc = could not find container \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": container with ID starting with c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.448663 4869 scope.go:117] "RemoveContainer" containerID="3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.448822 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580"} err="failed to get container status \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": rpc error: code = NotFound desc = could not find container \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": container with ID starting with 3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.448843 4869 scope.go:117] "RemoveContainer" containerID="06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449024 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994"} err="failed to get container status \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": rpc error: code = NotFound desc = could not find container \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": container with ID starting with 06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449039 4869 scope.go:117] "RemoveContainer" containerID="2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449260 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6"} err="failed to get container status \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": rpc error: code = NotFound desc = could not find container \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": container with ID starting with 2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449278 4869 scope.go:117] "RemoveContainer" containerID="c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449444 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111"} err="failed to get container status \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": rpc error: code = NotFound desc = could not find container \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": container with ID starting with c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449463 4869 scope.go:117] "RemoveContainer" containerID="3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449672 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580"} err="failed to get container status \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": rpc error: code = NotFound desc = could not find container \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": container with ID starting with 3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449689 4869 scope.go:117] "RemoveContainer" containerID="06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449864 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994"} err="failed to get container status \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": rpc error: code = NotFound desc = could not find container \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": container with ID starting with 06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.449882 4869 scope.go:117] "RemoveContainer" containerID="2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450052 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6"} err="failed to get container status \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": rpc error: code = NotFound desc = could not find container \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": container with ID starting with 2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450069 4869 scope.go:117] "RemoveContainer" containerID="c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450221 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111"} err="failed to get container status \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": rpc error: code = NotFound desc = could not find container \"c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111\": container with ID starting with c23bd61d6095f874fbe6a4be4a9bc4a801d072cccb2014951aceb9ba22b5a111 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450237 4869 scope.go:117] "RemoveContainer" containerID="3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450379 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580"} err="failed to get container status \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": rpc error: code = NotFound desc = could not find container \"3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580\": container with ID starting with 3e91dc7388dce071134c15007b4031e4d68dc1d810f6c4a55ebd67c75a37e580 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450394 4869 scope.go:117] "RemoveContainer" containerID="06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450738 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994"} err="failed to get container status \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": rpc error: code = NotFound desc = could not find container \"06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994\": container with ID starting with 06a11beced9f2714e872025d76404c263bf00e1211a63a88a77effa5fc244994 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450756 4869 scope.go:117] "RemoveContainer" containerID="2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.450936 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6"} err="failed to get container status \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": rpc error: code = NotFound desc = could not find container \"2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6\": container with ID starting with 2106f09f67f831f054270097a4be798ce041ed9c735592f77ba751133a5462b6 not found: ID does not exist" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.534189 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c136758-01e1-4dc6-ae25-73061febf56d-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.656830 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.656884 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.717180 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.732712 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.745248 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:50 crc kubenswrapper[4869]: E0929 14:02:50.746989 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="sg-core" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.747112 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="sg-core" Sep 29 14:02:50 crc kubenswrapper[4869]: E0929 14:02:50.747201 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="ceilometer-central-agent" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.747282 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="ceilometer-central-agent" Sep 29 14:02:50 crc kubenswrapper[4869]: E0929 14:02:50.747387 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="ceilometer-notification-agent" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.747474 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="ceilometer-notification-agent" Sep 29 14:02:50 crc kubenswrapper[4869]: E0929 14:02:50.747575 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="proxy-httpd" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.747673 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="proxy-httpd" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.748018 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="proxy-httpd" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.748110 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="ceilometer-central-agent" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.748222 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="sg-core" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.748296 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" containerName="ceilometer-notification-agent" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.750985 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.753337 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.753568 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.763908 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.841226 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.841311 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-config-data\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.841451 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-scripts\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.841629 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-run-httpd\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.841698 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.841740 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-log-httpd\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.841778 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twq7k\" (UniqueName: \"kubernetes.io/projected/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-kube-api-access-twq7k\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.943550 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-config-data\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.943646 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-scripts\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.943736 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-run-httpd\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.943790 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.943839 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-log-httpd\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.943868 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twq7k\" (UniqueName: \"kubernetes.io/projected/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-kube-api-access-twq7k\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.943934 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.944422 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-run-httpd\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.945013 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-log-httpd\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.949337 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.949466 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-config-data\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.950380 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.950923 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-scripts\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:50 crc kubenswrapper[4869]: I0929 14:02:50.962083 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twq7k\" (UniqueName: \"kubernetes.io/projected/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-kube-api-access-twq7k\") pod \"ceilometer-0\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " pod="openstack/ceilometer-0" Sep 29 14:02:51 crc kubenswrapper[4869]: I0929 14:02:51.070523 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:02:51 crc kubenswrapper[4869]: I0929 14:02:51.565791 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:02:52 crc kubenswrapper[4869]: I0929 14:02:52.254124 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c136758-01e1-4dc6-ae25-73061febf56d" path="/var/lib/kubelet/pods/0c136758-01e1-4dc6-ae25-73061febf56d/volumes" Sep 29 14:02:52 crc kubenswrapper[4869]: I0929 14:02:52.384109 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerStarted","Data":"5c0b3ef84a840dd2dae688b570ada88f5bd503184ded4860a9b13443e37eb5d5"} Sep 29 14:02:52 crc kubenswrapper[4869]: I0929 14:02:52.384162 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerStarted","Data":"e1ee95a0d04c519ff7873345dfbf3fdac9c292c76ac1ec3969781c3b3b9b854b"} Sep 29 14:02:52 crc kubenswrapper[4869]: I0929 14:02:52.384175 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerStarted","Data":"443670188e48ad3a8b3b0a78287f38dfe1a9f9198aec0d2adc3a66b71a7c5a33"} Sep 29 14:02:53 crc kubenswrapper[4869]: I0929 14:02:53.400003 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerStarted","Data":"d552eb73b06a36af012935127267f7410428af2dc0d40ef1ef070896a397c38e"} Sep 29 14:02:54 crc kubenswrapper[4869]: I0929 14:02:54.411336 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerStarted","Data":"1c069ad254a4c8fca1bb8b69b5c8726f0e006ff55992e6b97339b29ae8b8d461"} Sep 29 14:02:54 crc kubenswrapper[4869]: I0929 14:02:54.411597 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:02:54 crc kubenswrapper[4869]: I0929 14:02:54.438240 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.991363728 podStartE2EDuration="4.438221465s" podCreationTimestamp="2025-09-29 14:02:50 +0000 UTC" firstStartedPulling="2025-09-29 14:02:51.570495237 +0000 UTC m=+1298.011139557" lastFinishedPulling="2025-09-29 14:02:54.017352964 +0000 UTC m=+1300.457997294" observedRunningTime="2025-09-29 14:02:54.428445481 +0000 UTC m=+1300.869089811" watchObservedRunningTime="2025-09-29 14:02:54.438221465 +0000 UTC m=+1300.878865785" Sep 29 14:02:55 crc kubenswrapper[4869]: I0929 14:02:55.425830 4869 generic.go:334] "Generic (PLEG): container finished" podID="74c97303-a69c-471b-8b23-4a72ec813beb" containerID="986cf25b2326f69c3ca381493d6e256e0fecbacaeaa1d40aa16fc00927b8b791" exitCode=0 Sep 29 14:02:55 crc kubenswrapper[4869]: I0929 14:02:55.426030 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" event={"ID":"74c97303-a69c-471b-8b23-4a72ec813beb","Type":"ContainerDied","Data":"986cf25b2326f69c3ca381493d6e256e0fecbacaeaa1d40aa16fc00927b8b791"} Sep 29 14:02:56 crc kubenswrapper[4869]: I0929 14:02:56.813961 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:56 crc kubenswrapper[4869]: I0929 14:02:56.961686 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-config-data\") pod \"74c97303-a69c-471b-8b23-4a72ec813beb\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " Sep 29 14:02:56 crc kubenswrapper[4869]: I0929 14:02:56.961758 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgv9c\" (UniqueName: \"kubernetes.io/projected/74c97303-a69c-471b-8b23-4a72ec813beb-kube-api-access-vgv9c\") pod \"74c97303-a69c-471b-8b23-4a72ec813beb\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " Sep 29 14:02:56 crc kubenswrapper[4869]: I0929 14:02:56.961787 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-scripts\") pod \"74c97303-a69c-471b-8b23-4a72ec813beb\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " Sep 29 14:02:56 crc kubenswrapper[4869]: I0929 14:02:56.961986 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-combined-ca-bundle\") pod \"74c97303-a69c-471b-8b23-4a72ec813beb\" (UID: \"74c97303-a69c-471b-8b23-4a72ec813beb\") " Sep 29 14:02:56 crc kubenswrapper[4869]: I0929 14:02:56.967442 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-scripts" (OuterVolumeSpecName: "scripts") pod "74c97303-a69c-471b-8b23-4a72ec813beb" (UID: "74c97303-a69c-471b-8b23-4a72ec813beb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:56 crc kubenswrapper[4869]: I0929 14:02:56.967564 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74c97303-a69c-471b-8b23-4a72ec813beb-kube-api-access-vgv9c" (OuterVolumeSpecName: "kube-api-access-vgv9c") pod "74c97303-a69c-471b-8b23-4a72ec813beb" (UID: "74c97303-a69c-471b-8b23-4a72ec813beb"). InnerVolumeSpecName "kube-api-access-vgv9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:02:56 crc kubenswrapper[4869]: I0929 14:02:56.995259 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74c97303-a69c-471b-8b23-4a72ec813beb" (UID: "74c97303-a69c-471b-8b23-4a72ec813beb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.009837 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-config-data" (OuterVolumeSpecName: "config-data") pod "74c97303-a69c-471b-8b23-4a72ec813beb" (UID: "74c97303-a69c-471b-8b23-4a72ec813beb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.064177 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.064222 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.064235 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgv9c\" (UniqueName: \"kubernetes.io/projected/74c97303-a69c-471b-8b23-4a72ec813beb-kube-api-access-vgv9c\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.064252 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c97303-a69c-471b-8b23-4a72ec813beb-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.450533 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" event={"ID":"74c97303-a69c-471b-8b23-4a72ec813beb","Type":"ContainerDied","Data":"1be4d4b394a6f8e04de85379bb7bd863b8a19b23f4a74e98c8c96770bdd3a751"} Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.450826 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1be4d4b394a6f8e04de85379bb7bd863b8a19b23f4a74e98c8c96770bdd3a751" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.450881 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9fqpp" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.591126 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Sep 29 14:02:57 crc kubenswrapper[4869]: E0929 14:02:57.591795 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74c97303-a69c-471b-8b23-4a72ec813beb" containerName="nova-cell0-conductor-db-sync" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.591824 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="74c97303-a69c-471b-8b23-4a72ec813beb" containerName="nova-cell0-conductor-db-sync" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.592105 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="74c97303-a69c-471b-8b23-4a72ec813beb" containerName="nova-cell0-conductor-db-sync" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.593103 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.596040 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-vkql4" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.596956 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.607029 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.676346 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f980558d-d08e-40fc-a8f7-88a3b88f2b56-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.676442 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm6np\" (UniqueName: \"kubernetes.io/projected/f980558d-d08e-40fc-a8f7-88a3b88f2b56-kube-api-access-bm6np\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.676565 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f980558d-d08e-40fc-a8f7-88a3b88f2b56-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.778751 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f980558d-d08e-40fc-a8f7-88a3b88f2b56-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.778832 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm6np\" (UniqueName: \"kubernetes.io/projected/f980558d-d08e-40fc-a8f7-88a3b88f2b56-kube-api-access-bm6np\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.778908 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f980558d-d08e-40fc-a8f7-88a3b88f2b56-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.782802 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f980558d-d08e-40fc-a8f7-88a3b88f2b56-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.789251 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f980558d-d08e-40fc-a8f7-88a3b88f2b56-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.808705 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm6np\" (UniqueName: \"kubernetes.io/projected/f980558d-d08e-40fc-a8f7-88a3b88f2b56-kube-api-access-bm6np\") pod \"nova-cell0-conductor-0\" (UID: \"f980558d-d08e-40fc-a8f7-88a3b88f2b56\") " pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:57 crc kubenswrapper[4869]: I0929 14:02:57.912108 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:58 crc kubenswrapper[4869]: I0929 14:02:58.344532 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Sep 29 14:02:58 crc kubenswrapper[4869]: W0929 14:02:58.347753 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf980558d_d08e_40fc_a8f7_88a3b88f2b56.slice/crio-df517888cbfe5bbba197b16420ff788e714b1fe99a9661ffbbce37762273099f WatchSource:0}: Error finding container df517888cbfe5bbba197b16420ff788e714b1fe99a9661ffbbce37762273099f: Status 404 returned error can't find the container with id df517888cbfe5bbba197b16420ff788e714b1fe99a9661ffbbce37762273099f Sep 29 14:02:58 crc kubenswrapper[4869]: I0929 14:02:58.461597 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f980558d-d08e-40fc-a8f7-88a3b88f2b56","Type":"ContainerStarted","Data":"df517888cbfe5bbba197b16420ff788e714b1fe99a9661ffbbce37762273099f"} Sep 29 14:02:59 crc kubenswrapper[4869]: I0929 14:02:59.476377 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f980558d-d08e-40fc-a8f7-88a3b88f2b56","Type":"ContainerStarted","Data":"84f445c8adc6cc21f79fb3d6154eda03aafd256797255575c1be1cf057933385"} Sep 29 14:02:59 crc kubenswrapper[4869]: I0929 14:02:59.477836 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Sep 29 14:02:59 crc kubenswrapper[4869]: I0929 14:02:59.512545 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.512519834 podStartE2EDuration="2.512519834s" podCreationTimestamp="2025-09-29 14:02:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:02:59.502007981 +0000 UTC m=+1305.942652311" watchObservedRunningTime="2025-09-29 14:02:59.512519834 +0000 UTC m=+1305.953164184" Sep 29 14:03:07 crc kubenswrapper[4869]: I0929 14:03:07.938740 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.514394 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-6xgt7"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.519471 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.530084 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.530711 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.539200 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-6xgt7"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.607466 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-scripts\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.607548 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg6wz\" (UniqueName: \"kubernetes.io/projected/d9d39658-a34b-4541-be56-d2a215fa0c00-kube-api-access-wg6wz\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.607572 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-config-data\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.607670 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.656952 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.659339 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.665343 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.679572 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.693357 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.696780 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.706346 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.711376 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.711872 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-scripts\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.711951 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg6wz\" (UniqueName: \"kubernetes.io/projected/d9d39658-a34b-4541-be56-d2a215fa0c00-kube-api-access-wg6wz\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.711979 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-config-data\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.725744 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-scripts\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.731885 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-config-data\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.737677 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.739367 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.744874 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.747438 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg6wz\" (UniqueName: \"kubernetes.io/projected/d9d39658-a34b-4541-be56-d2a215fa0c00-kube-api-access-wg6wz\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.757383 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-6xgt7\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.760641 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.813665 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh5tm\" (UniqueName: \"kubernetes.io/projected/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-kube-api-access-nh5tm\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.813787 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-config-data\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.813818 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.813979 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-config-data\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.814042 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-logs\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.814201 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.814483 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2lwx\" (UniqueName: \"kubernetes.io/projected/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-kube-api-access-g2lwx\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.848599 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.862425 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.888995 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.890859 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.900040 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.917483 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/781e294b-35ba-43e1-ab5b-dfa72224bf72-logs\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.917653 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-config-data\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.917699 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.917730 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.917758 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbvzr\" (UniqueName: \"kubernetes.io/projected/781e294b-35ba-43e1-ab5b-dfa72224bf72-kube-api-access-mbvzr\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.917887 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-config-data\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.917930 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-logs\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.917985 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-config-data\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.918412 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-logs\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.918061 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.918822 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2lwx\" (UniqueName: \"kubernetes.io/projected/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-kube-api-access-g2lwx\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.918878 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh5tm\" (UniqueName: \"kubernetes.io/projected/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-kube-api-access-nh5tm\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.927702 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d758d5cd9-c8rjg"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.929715 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.950026 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-config-data\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.963835 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.966066 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.967403 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-config-data\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.984388 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh5tm\" (UniqueName: \"kubernetes.io/projected/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-kube-api-access-nh5tm\") pod \"nova-scheduler-0\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.989147 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:08 crc kubenswrapper[4869]: I0929 14:03:08.992240 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2lwx\" (UniqueName: \"kubernetes.io/projected/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-kube-api-access-g2lwx\") pod \"nova-api-0\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " pod="openstack/nova-api-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.011924 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d758d5cd9-c8rjg"] Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.020729 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.020839 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rhhm\" (UniqueName: \"kubernetes.io/projected/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-kube-api-access-4rhhm\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.020916 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/781e294b-35ba-43e1-ab5b-dfa72224bf72-logs\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.020974 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.021000 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbvzr\" (UniqueName: \"kubernetes.io/projected/781e294b-35ba-43e1-ab5b-dfa72224bf72-kube-api-access-mbvzr\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.021036 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.021075 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-config-data\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.022386 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/781e294b-35ba-43e1-ab5b-dfa72224bf72-logs\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.025653 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.027094 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-config-data\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.038438 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.042842 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbvzr\" (UniqueName: \"kubernetes.io/projected/781e294b-35ba-43e1-ab5b-dfa72224bf72-kube-api-access-mbvzr\") pod \"nova-metadata-0\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.141825 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-nb\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.142218 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.142660 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-config\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.142702 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.142771 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rhhm\" (UniqueName: \"kubernetes.io/projected/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-kube-api-access-4rhhm\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.142920 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-dns-svc\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.142954 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-sb\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.142976 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x9r2\" (UniqueName: \"kubernetes.io/projected/4c5be2a8-3d7f-4360-85dc-dc3535017311-kube-api-access-4x9r2\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.143037 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.151097 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.166450 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.181806 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rhhm\" (UniqueName: \"kubernetes.io/projected/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-kube-api-access-4rhhm\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.245178 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-sb\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.245226 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x9r2\" (UniqueName: \"kubernetes.io/projected/4c5be2a8-3d7f-4360-85dc-dc3535017311-kube-api-access-4x9r2\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.245296 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-nb\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.245322 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-config\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.245396 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-dns-svc\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.246403 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-dns-svc\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.246918 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-sb\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.246967 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-nb\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.247148 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-config\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.266398 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x9r2\" (UniqueName: \"kubernetes.io/projected/4c5be2a8-3d7f-4360-85dc-dc3535017311-kube-api-access-4x9r2\") pod \"dnsmasq-dns-7d758d5cd9-c8rjg\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.288919 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.356488 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.372541 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.528568 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-6xgt7"] Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.638284 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-6xgt7" event={"ID":"d9d39658-a34b-4541-be56-d2a215fa0c00","Type":"ContainerStarted","Data":"266cc69e5999496367968e916e4e7d8ad28f05fa5f23fd344b1bb22c57114bd6"} Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.854779 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:09 crc kubenswrapper[4869]: W0929 14:03:09.865810 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb19620a_ec4d_4101_b1e6_b65d6c89d3b8.slice/crio-ed246e066b606305aa96ffee29e38ef5da7eb62d547049b4c3618bfcee521b54 WatchSource:0}: Error finding container ed246e066b606305aa96ffee29e38ef5da7eb62d547049b4c3618bfcee521b54: Status 404 returned error can't find the container with id ed246e066b606305aa96ffee29e38ef5da7eb62d547049b4c3618bfcee521b54 Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.877086 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:03:09 crc kubenswrapper[4869]: I0929 14:03:09.883272 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.004452 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8vrtz"] Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.015075 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.017724 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.017936 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.020736 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8vrtz"] Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.069176 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.088856 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6mh9\" (UniqueName: \"kubernetes.io/projected/50306168-e24b-46f0-805b-6e703ff45a13-kube-api-access-s6mh9\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.088900 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-scripts\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.089039 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-config-data\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.089274 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.190019 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-config-data\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.190081 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.190139 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6mh9\" (UniqueName: \"kubernetes.io/projected/50306168-e24b-46f0-805b-6e703ff45a13-kube-api-access-s6mh9\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.190160 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-scripts\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.203831 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-scripts\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.204006 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.204372 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-config-data\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.223189 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6mh9\" (UniqueName: \"kubernetes.io/projected/50306168-e24b-46f0-805b-6e703ff45a13-kube-api-access-s6mh9\") pod \"nova-cell1-conductor-db-sync-8vrtz\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.280893 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.290265 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d758d5cd9-c8rjg"] Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.344527 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.661571 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185","Type":"ContainerStarted","Data":"2629e6b60283fe535dfe715a667b6f9b0d7e9f6aed0f2211e5100eceda682cd3"} Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.664572 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8","Type":"ContainerStarted","Data":"ed246e066b606305aa96ffee29e38ef5da7eb62d547049b4c3618bfcee521b54"} Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.680333 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-6xgt7" event={"ID":"d9d39658-a34b-4541-be56-d2a215fa0c00","Type":"ContainerStarted","Data":"e07c85a0625b72e0247c8ef94f874760bd643b375f9e0ad2b0ec0becb1f875b2"} Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.684870 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"781e294b-35ba-43e1-ab5b-dfa72224bf72","Type":"ContainerStarted","Data":"9c21c51dd4aaca6eb0e400f0cd42ef51e11a88e775befa96508b4d1441daa117"} Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.688586 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"58b3f451-f09d-4dce-96ed-01bb0c8e0b66","Type":"ContainerStarted","Data":"9656024665d9d0770287ce4d5fbe7f2c430c3dedaf58753dcf580f155e18b28c"} Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.692753 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" event={"ID":"4c5be2a8-3d7f-4360-85dc-dc3535017311","Type":"ContainerStarted","Data":"c84ece203afed896ae05e8d5449ce5169a27425e79c5eb4cffb5d0be3185b06d"} Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.692873 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" event={"ID":"4c5be2a8-3d7f-4360-85dc-dc3535017311","Type":"ContainerStarted","Data":"74f357227b16e3d639653286005deff35af2de05656501d7d608d17b2eedf5d2"} Sep 29 14:03:10 crc kubenswrapper[4869]: I0929 14:03:10.710288 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-6xgt7" podStartSLOduration=2.710264977 podStartE2EDuration="2.710264977s" podCreationTimestamp="2025-09-29 14:03:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:10.705014061 +0000 UTC m=+1317.145658401" watchObservedRunningTime="2025-09-29 14:03:10.710264977 +0000 UTC m=+1317.150909297" Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.036485 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8vrtz"] Sep 29 14:03:11 crc kubenswrapper[4869]: W0929 14:03:11.088668 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50306168_e24b_46f0_805b_6e703ff45a13.slice/crio-3989ca18dd76177740bd42c6eb736757f39991a69e1d7d1dbdf942e417d06b95 WatchSource:0}: Error finding container 3989ca18dd76177740bd42c6eb736757f39991a69e1d7d1dbdf942e417d06b95: Status 404 returned error can't find the container with id 3989ca18dd76177740bd42c6eb736757f39991a69e1d7d1dbdf942e417d06b95 Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.740357 4869 generic.go:334] "Generic (PLEG): container finished" podID="4c5be2a8-3d7f-4360-85dc-dc3535017311" containerID="c84ece203afed896ae05e8d5449ce5169a27425e79c5eb4cffb5d0be3185b06d" exitCode=0 Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.740664 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" event={"ID":"4c5be2a8-3d7f-4360-85dc-dc3535017311","Type":"ContainerDied","Data":"c84ece203afed896ae05e8d5449ce5169a27425e79c5eb4cffb5d0be3185b06d"} Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.740693 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" event={"ID":"4c5be2a8-3d7f-4360-85dc-dc3535017311","Type":"ContainerStarted","Data":"20bb7d3bc8e87e2f7be0f31ddc6fe5c244a1b0fe7bda35abbdf4710bd3dd7260"} Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.741309 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.755150 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" event={"ID":"50306168-e24b-46f0-805b-6e703ff45a13","Type":"ContainerStarted","Data":"021a7036073c25eb2fe3e6e820153baa172a60756451645a5308c7f9903f2f19"} Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.755211 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" event={"ID":"50306168-e24b-46f0-805b-6e703ff45a13","Type":"ContainerStarted","Data":"3989ca18dd76177740bd42c6eb736757f39991a69e1d7d1dbdf942e417d06b95"} Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.766527 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" podStartSLOduration=3.766506385 podStartE2EDuration="3.766506385s" podCreationTimestamp="2025-09-29 14:03:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:11.763155328 +0000 UTC m=+1318.203799658" watchObservedRunningTime="2025-09-29 14:03:11.766506385 +0000 UTC m=+1318.207150705" Sep 29 14:03:11 crc kubenswrapper[4869]: I0929 14:03:11.797315 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" podStartSLOduration=2.797293406 podStartE2EDuration="2.797293406s" podCreationTimestamp="2025-09-29 14:03:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:11.778262981 +0000 UTC m=+1318.218907301" watchObservedRunningTime="2025-09-29 14:03:11.797293406 +0000 UTC m=+1318.237937726" Sep 29 14:03:12 crc kubenswrapper[4869]: I0929 14:03:12.595494 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:12 crc kubenswrapper[4869]: I0929 14:03:12.624946 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.777164 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185","Type":"ContainerStarted","Data":"5a9ab96f48ed94ad601cb05442902b25006ce1b9d67c996493c7cc5a97e7ec5d"} Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.777357 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://5a9ab96f48ed94ad601cb05442902b25006ce1b9d67c996493c7cc5a97e7ec5d" gracePeriod=30 Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.779278 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8","Type":"ContainerStarted","Data":"cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55"} Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.785570 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"781e294b-35ba-43e1-ab5b-dfa72224bf72","Type":"ContainerStarted","Data":"f9e467293f90f7a8bf950df6f03ce7a89ee70b8858330a15f3e666a69f78e93d"} Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.785631 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"781e294b-35ba-43e1-ab5b-dfa72224bf72","Type":"ContainerStarted","Data":"173bb7f07d253fd3651bbb0a94ba773813e9f2459e632842c4eda9b6c9e98cbb"} Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.785949 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerName="nova-metadata-log" containerID="cri-o://173bb7f07d253fd3651bbb0a94ba773813e9f2459e632842c4eda9b6c9e98cbb" gracePeriod=30 Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.786169 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerName="nova-metadata-metadata" containerID="cri-o://f9e467293f90f7a8bf950df6f03ce7a89ee70b8858330a15f3e666a69f78e93d" gracePeriod=30 Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.788034 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"58b3f451-f09d-4dce-96ed-01bb0c8e0b66","Type":"ContainerStarted","Data":"e0370dff21283aec365991175f8bbfe8e4494b744c29f5ef51181dc7c6822fd7"} Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.788159 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"58b3f451-f09d-4dce-96ed-01bb0c8e0b66","Type":"ContainerStarted","Data":"dee9d1ab559bda72352b92cd23cda8112578ed07c9c1f68a85885278a190f8df"} Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.822382 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.577241999 podStartE2EDuration="5.822367178s" podCreationTimestamp="2025-09-29 14:03:08 +0000 UTC" firstStartedPulling="2025-09-29 14:03:09.876869252 +0000 UTC m=+1316.317513572" lastFinishedPulling="2025-09-29 14:03:13.121994431 +0000 UTC m=+1319.562638751" observedRunningTime="2025-09-29 14:03:13.820595662 +0000 UTC m=+1320.261239982" watchObservedRunningTime="2025-09-29 14:03:13.822367178 +0000 UTC m=+1320.263011498" Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.828572 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.9806688770000003 podStartE2EDuration="5.828554509s" podCreationTimestamp="2025-09-29 14:03:08 +0000 UTC" firstStartedPulling="2025-09-29 14:03:10.296959513 +0000 UTC m=+1316.737603833" lastFinishedPulling="2025-09-29 14:03:13.144845145 +0000 UTC m=+1319.585489465" observedRunningTime="2025-09-29 14:03:13.803705123 +0000 UTC m=+1320.244349443" watchObservedRunningTime="2025-09-29 14:03:13.828554509 +0000 UTC m=+1320.269198819" Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.841913 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.78501245 podStartE2EDuration="5.841888905s" podCreationTimestamp="2025-09-29 14:03:08 +0000 UTC" firstStartedPulling="2025-09-29 14:03:10.104089449 +0000 UTC m=+1316.544733769" lastFinishedPulling="2025-09-29 14:03:13.160965884 +0000 UTC m=+1319.601610224" observedRunningTime="2025-09-29 14:03:13.839932835 +0000 UTC m=+1320.280577165" watchObservedRunningTime="2025-09-29 14:03:13.841888905 +0000 UTC m=+1320.282533245" Sep 29 14:03:13 crc kubenswrapper[4869]: I0929 14:03:13.864108 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.60422225 podStartE2EDuration="5.864083682s" podCreationTimestamp="2025-09-29 14:03:08 +0000 UTC" firstStartedPulling="2025-09-29 14:03:09.881511903 +0000 UTC m=+1316.322156213" lastFinishedPulling="2025-09-29 14:03:13.141373305 +0000 UTC m=+1319.582017645" observedRunningTime="2025-09-29 14:03:13.855587472 +0000 UTC m=+1320.296231822" watchObservedRunningTime="2025-09-29 14:03:13.864083682 +0000 UTC m=+1320.304728002" Sep 29 14:03:14 crc kubenswrapper[4869]: I0929 14:03:14.026859 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Sep 29 14:03:14 crc kubenswrapper[4869]: I0929 14:03:14.142990 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Sep 29 14:03:14 crc kubenswrapper[4869]: I0929 14:03:14.143049 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Sep 29 14:03:14 crc kubenswrapper[4869]: I0929 14:03:14.359186 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:14 crc kubenswrapper[4869]: I0929 14:03:14.798236 4869 generic.go:334] "Generic (PLEG): container finished" podID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerID="173bb7f07d253fd3651bbb0a94ba773813e9f2459e632842c4eda9b6c9e98cbb" exitCode=143 Sep 29 14:03:14 crc kubenswrapper[4869]: I0929 14:03:14.798309 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"781e294b-35ba-43e1-ab5b-dfa72224bf72","Type":"ContainerDied","Data":"173bb7f07d253fd3651bbb0a94ba773813e9f2459e632842c4eda9b6c9e98cbb"} Sep 29 14:03:18 crc kubenswrapper[4869]: I0929 14:03:18.839295 4869 generic.go:334] "Generic (PLEG): container finished" podID="d9d39658-a34b-4541-be56-d2a215fa0c00" containerID="e07c85a0625b72e0247c8ef94f874760bd643b375f9e0ad2b0ec0becb1f875b2" exitCode=0 Sep 29 14:03:18 crc kubenswrapper[4869]: I0929 14:03:18.839340 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-6xgt7" event={"ID":"d9d39658-a34b-4541-be56-d2a215fa0c00","Type":"ContainerDied","Data":"e07c85a0625b72e0247c8ef94f874760bd643b375f9e0ad2b0ec0becb1f875b2"} Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.027160 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.053037 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.289650 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.289713 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.376049 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.458734 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f66b6f785-jzv4c"] Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.459006 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" podUID="9f660561-4e72-4887-a736-ba1d7c48ba95" containerName="dnsmasq-dns" containerID="cri-o://66d8b81db92b3b130128f4e90cf5e654e63f37a0c3163a15d52abc5c1358f253" gracePeriod=10 Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.855038 4869 generic.go:334] "Generic (PLEG): container finished" podID="9f660561-4e72-4887-a736-ba1d7c48ba95" containerID="66d8b81db92b3b130128f4e90cf5e654e63f37a0c3163a15d52abc5c1358f253" exitCode=0 Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.856585 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" event={"ID":"9f660561-4e72-4887-a736-ba1d7c48ba95","Type":"ContainerDied","Data":"66d8b81db92b3b130128f4e90cf5e654e63f37a0c3163a15d52abc5c1358f253"} Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.862858 4869 generic.go:334] "Generic (PLEG): container finished" podID="50306168-e24b-46f0-805b-6e703ff45a13" containerID="021a7036073c25eb2fe3e6e820153baa172a60756451645a5308c7f9903f2f19" exitCode=0 Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.863335 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" event={"ID":"50306168-e24b-46f0-805b-6e703ff45a13","Type":"ContainerDied","Data":"021a7036073c25eb2fe3e6e820153baa172a60756451645a5308c7f9903f2f19"} Sep 29 14:03:19 crc kubenswrapper[4869]: I0929 14:03:19.911177 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.027391 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.173153 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5g4x\" (UniqueName: \"kubernetes.io/projected/9f660561-4e72-4887-a736-ba1d7c48ba95-kube-api-access-j5g4x\") pod \"9f660561-4e72-4887-a736-ba1d7c48ba95\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.173276 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-dns-svc\") pod \"9f660561-4e72-4887-a736-ba1d7c48ba95\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.173371 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-config\") pod \"9f660561-4e72-4887-a736-ba1d7c48ba95\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.173460 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-nb\") pod \"9f660561-4e72-4887-a736-ba1d7c48ba95\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.173491 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-sb\") pod \"9f660561-4e72-4887-a736-ba1d7c48ba95\" (UID: \"9f660561-4e72-4887-a736-ba1d7c48ba95\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.186312 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f660561-4e72-4887-a736-ba1d7c48ba95-kube-api-access-j5g4x" (OuterVolumeSpecName: "kube-api-access-j5g4x") pod "9f660561-4e72-4887-a736-ba1d7c48ba95" (UID: "9f660561-4e72-4887-a736-ba1d7c48ba95"). InnerVolumeSpecName "kube-api-access-j5g4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.227817 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-config" (OuterVolumeSpecName: "config") pod "9f660561-4e72-4887-a736-ba1d7c48ba95" (UID: "9f660561-4e72-4887-a736-ba1d7c48ba95"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.236314 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9f660561-4e72-4887-a736-ba1d7c48ba95" (UID: "9f660561-4e72-4887-a736-ba1d7c48ba95"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.249062 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9f660561-4e72-4887-a736-ba1d7c48ba95" (UID: "9f660561-4e72-4887-a736-ba1d7c48ba95"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.257125 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9f660561-4e72-4887-a736-ba1d7c48ba95" (UID: "9f660561-4e72-4887-a736-ba1d7c48ba95"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.278819 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5g4x\" (UniqueName: \"kubernetes.io/projected/9f660561-4e72-4887-a736-ba1d7c48ba95-kube-api-access-j5g4x\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.278850 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.278862 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.278871 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.278880 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9f660561-4e72-4887-a736-ba1d7c48ba95-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.291412 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.372812 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.372914 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.482051 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-scripts\") pod \"d9d39658-a34b-4541-be56-d2a215fa0c00\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.482120 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wg6wz\" (UniqueName: \"kubernetes.io/projected/d9d39658-a34b-4541-be56-d2a215fa0c00-kube-api-access-wg6wz\") pod \"d9d39658-a34b-4541-be56-d2a215fa0c00\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.482235 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-combined-ca-bundle\") pod \"d9d39658-a34b-4541-be56-d2a215fa0c00\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.482255 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-config-data\") pod \"d9d39658-a34b-4541-be56-d2a215fa0c00\" (UID: \"d9d39658-a34b-4541-be56-d2a215fa0c00\") " Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.487639 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9d39658-a34b-4541-be56-d2a215fa0c00-kube-api-access-wg6wz" (OuterVolumeSpecName: "kube-api-access-wg6wz") pod "d9d39658-a34b-4541-be56-d2a215fa0c00" (UID: "d9d39658-a34b-4541-be56-d2a215fa0c00"). InnerVolumeSpecName "kube-api-access-wg6wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.487651 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-scripts" (OuterVolumeSpecName: "scripts") pod "d9d39658-a34b-4541-be56-d2a215fa0c00" (UID: "d9d39658-a34b-4541-be56-d2a215fa0c00"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.516544 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9d39658-a34b-4541-be56-d2a215fa0c00" (UID: "d9d39658-a34b-4541-be56-d2a215fa0c00"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.536271 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-config-data" (OuterVolumeSpecName: "config-data") pod "d9d39658-a34b-4541-be56-d2a215fa0c00" (UID: "d9d39658-a34b-4541-be56-d2a215fa0c00"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.586504 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wg6wz\" (UniqueName: \"kubernetes.io/projected/d9d39658-a34b-4541-be56-d2a215fa0c00-kube-api-access-wg6wz\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.586552 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.586566 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.586578 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9d39658-a34b-4541-be56-d2a215fa0c00-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.657269 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.657322 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.883315 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-6xgt7" event={"ID":"d9d39658-a34b-4541-be56-d2a215fa0c00","Type":"ContainerDied","Data":"266cc69e5999496367968e916e4e7d8ad28f05fa5f23fd344b1bb22c57114bd6"} Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.883382 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="266cc69e5999496367968e916e4e7d8ad28f05fa5f23fd344b1bb22c57114bd6" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.884473 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-6xgt7" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.904661 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.907846 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f66b6f785-jzv4c" event={"ID":"9f660561-4e72-4887-a736-ba1d7c48ba95","Type":"ContainerDied","Data":"e4180c10400810e53e3b57c4efd404463e76364fe12704f970b354cdcf4b82c7"} Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.907904 4869 scope.go:117] "RemoveContainer" containerID="66d8b81db92b3b130128f4e90cf5e654e63f37a0c3163a15d52abc5c1358f253" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.943664 4869 scope.go:117] "RemoveContainer" containerID="ae15503fdbbba09ac8036da2ebc5c3d959cfec382af40af75ac7790288ddda82" Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.953924 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f66b6f785-jzv4c"] Sep 29 14:03:20 crc kubenswrapper[4869]: I0929 14:03:20.968380 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f66b6f785-jzv4c"] Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.052517 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.052769 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-log" containerID="cri-o://dee9d1ab559bda72352b92cd23cda8112578ed07c9c1f68a85885278a190f8df" gracePeriod=30 Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.052913 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-api" containerID="cri-o://e0370dff21283aec365991175f8bbfe8e4494b744c29f5ef51181dc7c6822fd7" gracePeriod=30 Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.079990 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.084194 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.496566 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.621183 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-config-data\") pod \"50306168-e24b-46f0-805b-6e703ff45a13\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.621265 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-scripts\") pod \"50306168-e24b-46f0-805b-6e703ff45a13\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.621377 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6mh9\" (UniqueName: \"kubernetes.io/projected/50306168-e24b-46f0-805b-6e703ff45a13-kube-api-access-s6mh9\") pod \"50306168-e24b-46f0-805b-6e703ff45a13\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.621429 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-combined-ca-bundle\") pod \"50306168-e24b-46f0-805b-6e703ff45a13\" (UID: \"50306168-e24b-46f0-805b-6e703ff45a13\") " Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.630270 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50306168-e24b-46f0-805b-6e703ff45a13-kube-api-access-s6mh9" (OuterVolumeSpecName: "kube-api-access-s6mh9") pod "50306168-e24b-46f0-805b-6e703ff45a13" (UID: "50306168-e24b-46f0-805b-6e703ff45a13"). InnerVolumeSpecName "kube-api-access-s6mh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.636712 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-scripts" (OuterVolumeSpecName: "scripts") pod "50306168-e24b-46f0-805b-6e703ff45a13" (UID: "50306168-e24b-46f0-805b-6e703ff45a13"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.662720 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-config-data" (OuterVolumeSpecName: "config-data") pod "50306168-e24b-46f0-805b-6e703ff45a13" (UID: "50306168-e24b-46f0-805b-6e703ff45a13"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.670817 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50306168-e24b-46f0-805b-6e703ff45a13" (UID: "50306168-e24b-46f0-805b-6e703ff45a13"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.723821 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.724113 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.724123 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6mh9\" (UniqueName: \"kubernetes.io/projected/50306168-e24b-46f0-805b-6e703ff45a13-kube-api-access-s6mh9\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.724133 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50306168-e24b-46f0-805b-6e703ff45a13-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.973921 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" event={"ID":"50306168-e24b-46f0-805b-6e703ff45a13","Type":"ContainerDied","Data":"3989ca18dd76177740bd42c6eb736757f39991a69e1d7d1dbdf942e417d06b95"} Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.973964 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3989ca18dd76177740bd42c6eb736757f39991a69e1d7d1dbdf942e417d06b95" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.974032 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8vrtz" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.979771 4869 generic.go:334] "Generic (PLEG): container finished" podID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerID="dee9d1ab559bda72352b92cd23cda8112578ed07c9c1f68a85885278a190f8df" exitCode=143 Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.979994 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" containerName="nova-scheduler-scheduler" containerID="cri-o://cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55" gracePeriod=30 Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.980356 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"58b3f451-f09d-4dce-96ed-01bb0c8e0b66","Type":"ContainerDied","Data":"dee9d1ab559bda72352b92cd23cda8112578ed07c9c1f68a85885278a190f8df"} Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.985961 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Sep 29 14:03:21 crc kubenswrapper[4869]: E0929 14:03:21.986515 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f660561-4e72-4887-a736-ba1d7c48ba95" containerName="dnsmasq-dns" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.986535 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f660561-4e72-4887-a736-ba1d7c48ba95" containerName="dnsmasq-dns" Sep 29 14:03:21 crc kubenswrapper[4869]: E0929 14:03:21.986570 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f660561-4e72-4887-a736-ba1d7c48ba95" containerName="init" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.986578 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f660561-4e72-4887-a736-ba1d7c48ba95" containerName="init" Sep 29 14:03:21 crc kubenswrapper[4869]: E0929 14:03:21.986596 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9d39658-a34b-4541-be56-d2a215fa0c00" containerName="nova-manage" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.986603 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9d39658-a34b-4541-be56-d2a215fa0c00" containerName="nova-manage" Sep 29 14:03:21 crc kubenswrapper[4869]: E0929 14:03:21.986643 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50306168-e24b-46f0-805b-6e703ff45a13" containerName="nova-cell1-conductor-db-sync" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.986650 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="50306168-e24b-46f0-805b-6e703ff45a13" containerName="nova-cell1-conductor-db-sync" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.986911 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f660561-4e72-4887-a736-ba1d7c48ba95" containerName="dnsmasq-dns" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.986940 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9d39658-a34b-4541-be56-d2a215fa0c00" containerName="nova-manage" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.986954 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="50306168-e24b-46f0-805b-6e703ff45a13" containerName="nova-cell1-conductor-db-sync" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.989913 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:21 crc kubenswrapper[4869]: I0929 14:03:21.993948 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:21.999013 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.043299 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910de1ee-2bba-48cc-bd43-235cec60e09d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.043437 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6xdw\" (UniqueName: \"kubernetes.io/projected/910de1ee-2bba-48cc-bd43-235cec60e09d-kube-api-access-z6xdw\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.043477 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910de1ee-2bba-48cc-bd43-235cec60e09d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.144966 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6xdw\" (UniqueName: \"kubernetes.io/projected/910de1ee-2bba-48cc-bd43-235cec60e09d-kube-api-access-z6xdw\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.145031 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910de1ee-2bba-48cc-bd43-235cec60e09d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.145062 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910de1ee-2bba-48cc-bd43-235cec60e09d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.149335 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910de1ee-2bba-48cc-bd43-235cec60e09d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.149699 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910de1ee-2bba-48cc-bd43-235cec60e09d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.170310 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6xdw\" (UniqueName: \"kubernetes.io/projected/910de1ee-2bba-48cc-bd43-235cec60e09d-kube-api-access-z6xdw\") pod \"nova-cell1-conductor-0\" (UID: \"910de1ee-2bba-48cc-bd43-235cec60e09d\") " pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.254778 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f660561-4e72-4887-a736-ba1d7c48ba95" path="/var/lib/kubelet/pods/9f660561-4e72-4887-a736-ba1d7c48ba95/volumes" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.334800 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.799511 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Sep 29 14:03:22 crc kubenswrapper[4869]: I0929 14:03:22.988654 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"910de1ee-2bba-48cc-bd43-235cec60e09d","Type":"ContainerStarted","Data":"0e1e632589346eaa6eda536f2ea3cf33bcc53d99e09204fe16ad9efa4a244e25"} Sep 29 14:03:23 crc kubenswrapper[4869]: I0929 14:03:23.999034 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"910de1ee-2bba-48cc-bd43-235cec60e09d","Type":"ContainerStarted","Data":"847784d1a8795756f70b3d6d0f69c8dee8ee82fd769455e7f956e3741a89be84"} Sep 29 14:03:24 crc kubenswrapper[4869]: I0929 14:03:23.999453 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:24 crc kubenswrapper[4869]: I0929 14:03:24.015349 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.01533189 podStartE2EDuration="3.01533189s" podCreationTimestamp="2025-09-29 14:03:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:24.013579385 +0000 UTC m=+1330.454223705" watchObservedRunningTime="2025-09-29 14:03:24.01533189 +0000 UTC m=+1330.455976210" Sep 29 14:03:24 crc kubenswrapper[4869]: E0929 14:03:24.029373 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Sep 29 14:03:24 crc kubenswrapper[4869]: E0929 14:03:24.031709 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Sep 29 14:03:24 crc kubenswrapper[4869]: E0929 14:03:24.033153 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Sep 29 14:03:24 crc kubenswrapper[4869]: E0929 14:03:24.033230 4869 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" containerName="nova-scheduler-scheduler" Sep 29 14:03:24 crc kubenswrapper[4869]: I0929 14:03:24.130456 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 14:03:24 crc kubenswrapper[4869]: I0929 14:03:24.130908 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="39fc25b3-44e9-413a-a50b-71655cb60e49" containerName="kube-state-metrics" containerID="cri-o://b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5" gracePeriod=30 Sep 29 14:03:24 crc kubenswrapper[4869]: I0929 14:03:24.638776 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Sep 29 14:03:24 crc kubenswrapper[4869]: I0929 14:03:24.705397 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wn9jk\" (UniqueName: \"kubernetes.io/projected/39fc25b3-44e9-413a-a50b-71655cb60e49-kube-api-access-wn9jk\") pod \"39fc25b3-44e9-413a-a50b-71655cb60e49\" (UID: \"39fc25b3-44e9-413a-a50b-71655cb60e49\") " Sep 29 14:03:24 crc kubenswrapper[4869]: I0929 14:03:24.729383 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39fc25b3-44e9-413a-a50b-71655cb60e49-kube-api-access-wn9jk" (OuterVolumeSpecName: "kube-api-access-wn9jk") pod "39fc25b3-44e9-413a-a50b-71655cb60e49" (UID: "39fc25b3-44e9-413a-a50b-71655cb60e49"). InnerVolumeSpecName "kube-api-access-wn9jk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:24 crc kubenswrapper[4869]: I0929 14:03:24.807893 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wn9jk\" (UniqueName: \"kubernetes.io/projected/39fc25b3-44e9-413a-a50b-71655cb60e49-kube-api-access-wn9jk\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.011774 4869 generic.go:334] "Generic (PLEG): container finished" podID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerID="e0370dff21283aec365991175f8bbfe8e4494b744c29f5ef51181dc7c6822fd7" exitCode=0 Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.012849 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"58b3f451-f09d-4dce-96ed-01bb0c8e0b66","Type":"ContainerDied","Data":"e0370dff21283aec365991175f8bbfe8e4494b744c29f5ef51181dc7c6822fd7"} Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.016116 4869 generic.go:334] "Generic (PLEG): container finished" podID="39fc25b3-44e9-413a-a50b-71655cb60e49" containerID="b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5" exitCode=2 Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.017236 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.021208 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"39fc25b3-44e9-413a-a50b-71655cb60e49","Type":"ContainerDied","Data":"b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5"} Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.021375 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"39fc25b3-44e9-413a-a50b-71655cb60e49","Type":"ContainerDied","Data":"b349b7001523dfcec6a96cd078f1040c0101793ebb6454ddd29c6d4d56224753"} Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.021470 4869 scope.go:117] "RemoveContainer" containerID="b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.077301 4869 scope.go:117] "RemoveContainer" containerID="b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5" Sep 29 14:03:25 crc kubenswrapper[4869]: E0929 14:03:25.079569 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5\": container with ID starting with b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5 not found: ID does not exist" containerID="b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.079623 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5"} err="failed to get container status \"b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5\": rpc error: code = NotFound desc = could not find container \"b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5\": container with ID starting with b01165d660e7c9cd79941d7421bab7be7a81711cccc835a326690d21bb2741f5 not found: ID does not exist" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.088896 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.132579 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.155959 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 14:03:25 crc kubenswrapper[4869]: E0929 14:03:25.156993 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39fc25b3-44e9-413a-a50b-71655cb60e49" containerName="kube-state-metrics" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.157011 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="39fc25b3-44e9-413a-a50b-71655cb60e49" containerName="kube-state-metrics" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.161259 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="39fc25b3-44e9-413a-a50b-71655cb60e49" containerName="kube-state-metrics" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.162896 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.168440 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.169448 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.211844 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.215233 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.215873 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.215928 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.215996 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlvkc\" (UniqueName: \"kubernetes.io/projected/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-api-access-wlvkc\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.317771 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.317804 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.317847 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlvkc\" (UniqueName: \"kubernetes.io/projected/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-api-access-wlvkc\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.317873 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.322838 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.328532 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.337215 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.346055 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlvkc\" (UniqueName: \"kubernetes.io/projected/a11f831b-4af3-43cd-a0d0-0499b3e5e084-kube-api-access-wlvkc\") pod \"kube-state-metrics-0\" (UID: \"a11f831b-4af3-43cd-a0d0-0499b3e5e084\") " pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.436760 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.437041 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="ceilometer-central-agent" containerID="cri-o://e1ee95a0d04c519ff7873345dfbf3fdac9c292c76ac1ec3969781c3b3b9b854b" gracePeriod=30 Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.437522 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="proxy-httpd" containerID="cri-o://1c069ad254a4c8fca1bb8b69b5c8726f0e006ff55992e6b97339b29ae8b8d461" gracePeriod=30 Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.437572 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="sg-core" containerID="cri-o://d552eb73b06a36af012935127267f7410428af2dc0d40ef1ef070896a397c38e" gracePeriod=30 Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.437624 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="ceilometer-notification-agent" containerID="cri-o://5c0b3ef84a840dd2dae688b570ada88f5bd503184ded4860a9b13443e37eb5d5" gracePeriod=30 Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.486518 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.660440 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.738976 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-logs\") pod \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.739168 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-combined-ca-bundle\") pod \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.739227 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-config-data\") pod \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.739371 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2lwx\" (UniqueName: \"kubernetes.io/projected/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-kube-api-access-g2lwx\") pod \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\" (UID: \"58b3f451-f09d-4dce-96ed-01bb0c8e0b66\") " Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.743107 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-logs" (OuterVolumeSpecName: "logs") pod "58b3f451-f09d-4dce-96ed-01bb0c8e0b66" (UID: "58b3f451-f09d-4dce-96ed-01bb0c8e0b66"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.748213 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-kube-api-access-g2lwx" (OuterVolumeSpecName: "kube-api-access-g2lwx") pod "58b3f451-f09d-4dce-96ed-01bb0c8e0b66" (UID: "58b3f451-f09d-4dce-96ed-01bb0c8e0b66"). InnerVolumeSpecName "kube-api-access-g2lwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.780716 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "58b3f451-f09d-4dce-96ed-01bb0c8e0b66" (UID: "58b3f451-f09d-4dce-96ed-01bb0c8e0b66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.783779 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.784113 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-config-data" (OuterVolumeSpecName: "config-data") pod "58b3f451-f09d-4dce-96ed-01bb0c8e0b66" (UID: "58b3f451-f09d-4dce-96ed-01bb0c8e0b66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.841183 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-combined-ca-bundle\") pod \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.841250 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nh5tm\" (UniqueName: \"kubernetes.io/projected/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-kube-api-access-nh5tm\") pod \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.841354 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-config-data\") pod \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\" (UID: \"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8\") " Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.842415 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2lwx\" (UniqueName: \"kubernetes.io/projected/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-kube-api-access-g2lwx\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.842452 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.842466 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.842476 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b3f451-f09d-4dce-96ed-01bb0c8e0b66-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.852422 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-kube-api-access-nh5tm" (OuterVolumeSpecName: "kube-api-access-nh5tm") pod "cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" (UID: "cb19620a-ec4d-4101-b1e6-b65d6c89d3b8"). InnerVolumeSpecName "kube-api-access-nh5tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.886782 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" (UID: "cb19620a-ec4d-4101-b1e6-b65d6c89d3b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.886838 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-config-data" (OuterVolumeSpecName: "config-data") pod "cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" (UID: "cb19620a-ec4d-4101-b1e6-b65d6c89d3b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.945015 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.945052 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nh5tm\" (UniqueName: \"kubernetes.io/projected/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-kube-api-access-nh5tm\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:25 crc kubenswrapper[4869]: I0929 14:03:25.945065 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.035757 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.041259 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"58b3f451-f09d-4dce-96ed-01bb0c8e0b66","Type":"ContainerDied","Data":"9656024665d9d0770287ce4d5fbe7f2c430c3dedaf58753dcf580f155e18b28c"} Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.041308 4869 scope.go:117] "RemoveContainer" containerID="e0370dff21283aec365991175f8bbfe8e4494b744c29f5ef51181dc7c6822fd7" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.041427 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.048501 4869 generic.go:334] "Generic (PLEG): container finished" podID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerID="1c069ad254a4c8fca1bb8b69b5c8726f0e006ff55992e6b97339b29ae8b8d461" exitCode=0 Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.048644 4869 generic.go:334] "Generic (PLEG): container finished" podID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerID="d552eb73b06a36af012935127267f7410428af2dc0d40ef1ef070896a397c38e" exitCode=2 Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.048751 4869 generic.go:334] "Generic (PLEG): container finished" podID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerID="e1ee95a0d04c519ff7873345dfbf3fdac9c292c76ac1ec3969781c3b3b9b854b" exitCode=0 Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.048879 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerDied","Data":"1c069ad254a4c8fca1bb8b69b5c8726f0e006ff55992e6b97339b29ae8b8d461"} Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.048969 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerDied","Data":"d552eb73b06a36af012935127267f7410428af2dc0d40ef1ef070896a397c38e"} Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.049030 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerDied","Data":"e1ee95a0d04c519ff7873345dfbf3fdac9c292c76ac1ec3969781c3b3b9b854b"} Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.055083 4869 generic.go:334] "Generic (PLEG): container finished" podID="cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" containerID="cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55" exitCode=0 Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.055192 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8","Type":"ContainerDied","Data":"cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55"} Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.055221 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cb19620a-ec4d-4101-b1e6-b65d6c89d3b8","Type":"ContainerDied","Data":"ed246e066b606305aa96ffee29e38ef5da7eb62d547049b4c3618bfcee521b54"} Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.055774 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.071844 4869 scope.go:117] "RemoveContainer" containerID="dee9d1ab559bda72352b92cd23cda8112578ed07c9c1f68a85885278a190f8df" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.098518 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.115952 4869 scope.go:117] "RemoveContainer" containerID="cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.121600 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.133428 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.150143 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: E0929 14:03:26.150867 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" containerName="nova-scheduler-scheduler" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.150960 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" containerName="nova-scheduler-scheduler" Sep 29 14:03:26 crc kubenswrapper[4869]: E0929 14:03:26.151068 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-log" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.151146 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-log" Sep 29 14:03:26 crc kubenswrapper[4869]: E0929 14:03:26.151227 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-api" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.151281 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-api" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.151526 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-api" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.151628 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" containerName="nova-scheduler-scheduler" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.151704 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" containerName="nova-api-log" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.152895 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.156180 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.163419 4869 scope.go:117] "RemoveContainer" containerID="cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.165320 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: E0929 14:03:26.165465 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55\": container with ID starting with cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55 not found: ID does not exist" containerID="cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.165515 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55"} err="failed to get container status \"cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55\": rpc error: code = NotFound desc = could not find container \"cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55\": container with ID starting with cd292211e2a84d5f10e531097135227593e04c93b43c8e176804aa7f582aaf55 not found: ID does not exist" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.172830 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.182060 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.184439 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.189117 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.196363 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.249731 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-config-data\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.249786 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.249826 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knpf5\" (UniqueName: \"kubernetes.io/projected/a0abade9-67cb-4289-94c3-78acda74b360-kube-api-access-knpf5\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.249870 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.249896 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07263f09-050e-4ff4-845c-2fa6f2893dbf-logs\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.250035 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-config-data\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.250206 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz5k2\" (UniqueName: \"kubernetes.io/projected/07263f09-050e-4ff4-845c-2fa6f2893dbf-kube-api-access-xz5k2\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.257747 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39fc25b3-44e9-413a-a50b-71655cb60e49" path="/var/lib/kubelet/pods/39fc25b3-44e9-413a-a50b-71655cb60e49/volumes" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.259143 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58b3f451-f09d-4dce-96ed-01bb0c8e0b66" path="/var/lib/kubelet/pods/58b3f451-f09d-4dce-96ed-01bb0c8e0b66/volumes" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.259765 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb19620a-ec4d-4101-b1e6-b65d6c89d3b8" path="/var/lib/kubelet/pods/cb19620a-ec4d-4101-b1e6-b65d6c89d3b8/volumes" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.351957 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-config-data\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.352043 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz5k2\" (UniqueName: \"kubernetes.io/projected/07263f09-050e-4ff4-845c-2fa6f2893dbf-kube-api-access-xz5k2\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.352237 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-config-data\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.352261 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.352300 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knpf5\" (UniqueName: \"kubernetes.io/projected/a0abade9-67cb-4289-94c3-78acda74b360-kube-api-access-knpf5\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.352343 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.352382 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07263f09-050e-4ff4-845c-2fa6f2893dbf-logs\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.353255 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07263f09-050e-4ff4-845c-2fa6f2893dbf-logs\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.357051 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-config-data\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.358197 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-config-data\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.358671 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.359491 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.370467 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knpf5\" (UniqueName: \"kubernetes.io/projected/a0abade9-67cb-4289-94c3-78acda74b360-kube-api-access-knpf5\") pod \"nova-scheduler-0\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " pod="openstack/nova-scheduler-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.370699 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz5k2\" (UniqueName: \"kubernetes.io/projected/07263f09-050e-4ff4-845c-2fa6f2893dbf-kube-api-access-xz5k2\") pod \"nova-api-0\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.498515 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:26 crc kubenswrapper[4869]: I0929 14:03:26.511063 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.063102 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:27 crc kubenswrapper[4869]: W0929 14:03:27.064100 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07263f09_050e_4ff4_845c_2fa6f2893dbf.slice/crio-f989676be59b272efb9c5ac3c5338686ee7c4c311227578f7afb230ca98c3423 WatchSource:0}: Error finding container f989676be59b272efb9c5ac3c5338686ee7c4c311227578f7afb230ca98c3423: Status 404 returned error can't find the container with id f989676be59b272efb9c5ac3c5338686ee7c4c311227578f7afb230ca98c3423 Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.084590 4869 generic.go:334] "Generic (PLEG): container finished" podID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerID="5c0b3ef84a840dd2dae688b570ada88f5bd503184ded4860a9b13443e37eb5d5" exitCode=0 Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.084710 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerDied","Data":"5c0b3ef84a840dd2dae688b570ada88f5bd503184ded4860a9b13443e37eb5d5"} Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.095912 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a11f831b-4af3-43cd-a0d0-0499b3e5e084","Type":"ContainerStarted","Data":"96f7ae735f4453fe5b1772ba32a77f60277db88a7a5c6850b135137569d15e65"} Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.096004 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a11f831b-4af3-43cd-a0d0-0499b3e5e084","Type":"ContainerStarted","Data":"525b03cef72a0de845db420c1cc958d8170dca136f6d6c1ecd778c63ac584899"} Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.096824 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.123353 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.753395568 podStartE2EDuration="2.123327495s" podCreationTimestamp="2025-09-29 14:03:25 +0000 UTC" firstStartedPulling="2025-09-29 14:03:26.050075805 +0000 UTC m=+1332.490720125" lastFinishedPulling="2025-09-29 14:03:26.420007732 +0000 UTC m=+1332.860652052" observedRunningTime="2025-09-29 14:03:27.113040458 +0000 UTC m=+1333.553684778" watchObservedRunningTime="2025-09-29 14:03:27.123327495 +0000 UTC m=+1333.563971825" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.158315 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.210650 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.285838 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-config-data\") pod \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.286301 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-run-httpd\") pod \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.286706 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5ebf71cf-0dc1-4931-9bd9-c228d74a3683" (UID: "5ebf71cf-0dc1-4931-9bd9-c228d74a3683"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.286861 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-combined-ca-bundle\") pod \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.286891 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-sg-core-conf-yaml\") pod \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.287492 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twq7k\" (UniqueName: \"kubernetes.io/projected/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-kube-api-access-twq7k\") pod \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.287573 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-log-httpd\") pod \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.287643 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-scripts\") pod \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\" (UID: \"5ebf71cf-0dc1-4931-9bd9-c228d74a3683\") " Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.288168 4869 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.288843 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5ebf71cf-0dc1-4931-9bd9-c228d74a3683" (UID: "5ebf71cf-0dc1-4931-9bd9-c228d74a3683"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.299452 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-kube-api-access-twq7k" (OuterVolumeSpecName: "kube-api-access-twq7k") pod "5ebf71cf-0dc1-4931-9bd9-c228d74a3683" (UID: "5ebf71cf-0dc1-4931-9bd9-c228d74a3683"). InnerVolumeSpecName "kube-api-access-twq7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.303424 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-scripts" (OuterVolumeSpecName: "scripts") pod "5ebf71cf-0dc1-4931-9bd9-c228d74a3683" (UID: "5ebf71cf-0dc1-4931-9bd9-c228d74a3683"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.358963 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5ebf71cf-0dc1-4931-9bd9-c228d74a3683" (UID: "5ebf71cf-0dc1-4931-9bd9-c228d74a3683"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.389998 4869 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.390037 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twq7k\" (UniqueName: \"kubernetes.io/projected/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-kube-api-access-twq7k\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.390048 4869 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.390058 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.426515 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ebf71cf-0dc1-4931-9bd9-c228d74a3683" (UID: "5ebf71cf-0dc1-4931-9bd9-c228d74a3683"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.439554 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-config-data" (OuterVolumeSpecName: "config-data") pod "5ebf71cf-0dc1-4931-9bd9-c228d74a3683" (UID: "5ebf71cf-0dc1-4931-9bd9-c228d74a3683"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.492016 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:27 crc kubenswrapper[4869]: I0929 14:03:27.492059 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ebf71cf-0dc1-4931-9bd9-c228d74a3683-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.130913 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a0abade9-67cb-4289-94c3-78acda74b360","Type":"ContainerStarted","Data":"431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc"} Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.131256 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a0abade9-67cb-4289-94c3-78acda74b360","Type":"ContainerStarted","Data":"1f3f1795a7c55e069f672d9621c8835a5ca93c2ac7368fdc4bb0f789e045b46c"} Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.132589 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07263f09-050e-4ff4-845c-2fa6f2893dbf","Type":"ContainerStarted","Data":"fee3e261eb7be71d7308513a7cc85b7813936eebf468064a96d423941a88b347"} Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.132636 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07263f09-050e-4ff4-845c-2fa6f2893dbf","Type":"ContainerStarted","Data":"a652789c225b722a428cba07e153bc86a33d68301d7e40e54bab6f1679b93e6a"} Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.132649 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07263f09-050e-4ff4-845c-2fa6f2893dbf","Type":"ContainerStarted","Data":"f989676be59b272efb9c5ac3c5338686ee7c4c311227578f7afb230ca98c3423"} Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.137112 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ebf71cf-0dc1-4931-9bd9-c228d74a3683","Type":"ContainerDied","Data":"443670188e48ad3a8b3b0a78287f38dfe1a9f9198aec0d2adc3a66b71a7c5a33"} Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.137166 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.137151 4869 scope.go:117] "RemoveContainer" containerID="1c069ad254a4c8fca1bb8b69b5c8726f0e006ff55992e6b97339b29ae8b8d461" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.154389 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.154366947 podStartE2EDuration="2.154366947s" podCreationTimestamp="2025-09-29 14:03:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:28.145442255 +0000 UTC m=+1334.586086575" watchObservedRunningTime="2025-09-29 14:03:28.154366947 +0000 UTC m=+1334.595011267" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.168231 4869 scope.go:117] "RemoveContainer" containerID="d552eb73b06a36af012935127267f7410428af2dc0d40ef1ef070896a397c38e" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.185415 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.185395754 podStartE2EDuration="2.185395754s" podCreationTimestamp="2025-09-29 14:03:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:28.166970415 +0000 UTC m=+1334.607614735" watchObservedRunningTime="2025-09-29 14:03:28.185395754 +0000 UTC m=+1334.626040074" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.198216 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.211647 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.215212 4869 scope.go:117] "RemoveContainer" containerID="5c0b3ef84a840dd2dae688b570ada88f5bd503184ded4860a9b13443e37eb5d5" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.228717 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:28 crc kubenswrapper[4869]: E0929 14:03:28.229203 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="ceilometer-notification-agent" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.229228 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="ceilometer-notification-agent" Sep 29 14:03:28 crc kubenswrapper[4869]: E0929 14:03:28.229243 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="proxy-httpd" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.229253 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="proxy-httpd" Sep 29 14:03:28 crc kubenswrapper[4869]: E0929 14:03:28.229284 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="ceilometer-central-agent" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.229293 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="ceilometer-central-agent" Sep 29 14:03:28 crc kubenswrapper[4869]: E0929 14:03:28.229320 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="sg-core" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.229327 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="sg-core" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.229525 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="ceilometer-central-agent" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.229540 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="sg-core" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.229561 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="ceilometer-notification-agent" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.229572 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" containerName="proxy-httpd" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.231740 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.233765 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.234060 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.234304 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.250298 4869 scope.go:117] "RemoveContainer" containerID="e1ee95a0d04c519ff7873345dfbf3fdac9c292c76ac1ec3969781c3b3b9b854b" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.256299 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ebf71cf-0dc1-4931-9bd9-c228d74a3683" path="/var/lib/kubelet/pods/5ebf71cf-0dc1-4931-9bd9-c228d74a3683/volumes" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.257021 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.308706 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.308749 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-scripts\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.308768 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7297\" (UniqueName: \"kubernetes.io/projected/036d500b-0aad-41e4-8272-7b0632f02c69-kube-api-access-s7297\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.308791 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.308847 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-config-data\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.308887 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-run-httpd\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.308947 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-log-httpd\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.309053 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.410880 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.410980 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.411022 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-scripts\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.411040 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7297\" (UniqueName: \"kubernetes.io/projected/036d500b-0aad-41e4-8272-7b0632f02c69-kube-api-access-s7297\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.411064 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.411085 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-config-data\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.411112 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-run-httpd\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.411146 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-log-httpd\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.411549 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-log-httpd\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.412215 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-run-httpd\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.415600 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.416284 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-scripts\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.416652 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.417699 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-config-data\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.430733 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.451291 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7297\" (UniqueName: \"kubernetes.io/projected/036d500b-0aad-41e4-8272-7b0632f02c69-kube-api-access-s7297\") pod \"ceilometer-0\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " pod="openstack/ceilometer-0" Sep 29 14:03:28 crc kubenswrapper[4869]: I0929 14:03:28.561902 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:03:29 crc kubenswrapper[4869]: I0929 14:03:29.038433 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:29 crc kubenswrapper[4869]: I0929 14:03:29.147587 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerStarted","Data":"2a6a6a848c875201f111a39759bbba75d80a0236b347821eb14addef4491b6ce"} Sep 29 14:03:30 crc kubenswrapper[4869]: I0929 14:03:30.169093 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerStarted","Data":"db849fc9c05c2d80820531e5dff75c7a8036cafc7b659e5a4f9b50d0ea597783"} Sep 29 14:03:30 crc kubenswrapper[4869]: I0929 14:03:30.169583 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerStarted","Data":"a57a7cd3ac16bc2a07eaf2658fb61b98eacaea0d08dbb5c0bb66aa8002832b65"} Sep 29 14:03:31 crc kubenswrapper[4869]: I0929 14:03:31.182592 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerStarted","Data":"8261359bd9f4ead2edda41ef34936a0a8da3d2fd754c1e4e7f83286f6712880e"} Sep 29 14:03:31 crc kubenswrapper[4869]: I0929 14:03:31.512686 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Sep 29 14:03:32 crc kubenswrapper[4869]: I0929 14:03:32.196414 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerStarted","Data":"98d7978396ea6ea76398d328ef1ae00a461ccb86dc2feeb0c42201a8c7a102e3"} Sep 29 14:03:32 crc kubenswrapper[4869]: I0929 14:03:32.197703 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:03:32 crc kubenswrapper[4869]: I0929 14:03:32.221989 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.807811129 podStartE2EDuration="4.221970777s" podCreationTimestamp="2025-09-29 14:03:28 +0000 UTC" firstStartedPulling="2025-09-29 14:03:29.047092844 +0000 UTC m=+1335.487737164" lastFinishedPulling="2025-09-29 14:03:31.461252472 +0000 UTC m=+1337.901896812" observedRunningTime="2025-09-29 14:03:32.218591179 +0000 UTC m=+1338.659235499" watchObservedRunningTime="2025-09-29 14:03:32.221970777 +0000 UTC m=+1338.662615097" Sep 29 14:03:32 crc kubenswrapper[4869]: I0929 14:03:32.384535 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Sep 29 14:03:35 crc kubenswrapper[4869]: I0929 14:03:35.502181 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Sep 29 14:03:36 crc kubenswrapper[4869]: I0929 14:03:36.499875 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Sep 29 14:03:36 crc kubenswrapper[4869]: I0929 14:03:36.499953 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Sep 29 14:03:36 crc kubenswrapper[4869]: I0929 14:03:36.512535 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Sep 29 14:03:36 crc kubenswrapper[4869]: I0929 14:03:36.542940 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Sep 29 14:03:37 crc kubenswrapper[4869]: I0929 14:03:37.282546 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Sep 29 14:03:37 crc kubenswrapper[4869]: I0929 14:03:37.540790 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.200:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Sep 29 14:03:37 crc kubenswrapper[4869]: I0929 14:03:37.581802 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.200:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.315622 4869 generic.go:334] "Generic (PLEG): container finished" podID="ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" containerID="5a9ab96f48ed94ad601cb05442902b25006ce1b9d67c996493c7cc5a97e7ec5d" exitCode=137 Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.315779 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185","Type":"ContainerDied","Data":"5a9ab96f48ed94ad601cb05442902b25006ce1b9d67c996493c7cc5a97e7ec5d"} Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.318432 4869 generic.go:334] "Generic (PLEG): container finished" podID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerID="f9e467293f90f7a8bf950df6f03ce7a89ee70b8858330a15f3e666a69f78e93d" exitCode=137 Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.318475 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"781e294b-35ba-43e1-ab5b-dfa72224bf72","Type":"ContainerDied","Data":"f9e467293f90f7a8bf950df6f03ce7a89ee70b8858330a15f3e666a69f78e93d"} Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.318504 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"781e294b-35ba-43e1-ab5b-dfa72224bf72","Type":"ContainerDied","Data":"9c21c51dd4aaca6eb0e400f0cd42ef51e11a88e775befa96508b4d1441daa117"} Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.318519 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c21c51dd4aaca6eb0e400f0cd42ef51e11a88e775befa96508b4d1441daa117" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.396218 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.402272 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.466474 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-combined-ca-bundle\") pod \"781e294b-35ba-43e1-ab5b-dfa72224bf72\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.466665 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/781e294b-35ba-43e1-ab5b-dfa72224bf72-logs\") pod \"781e294b-35ba-43e1-ab5b-dfa72224bf72\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.466804 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-config-data\") pod \"781e294b-35ba-43e1-ab5b-dfa72224bf72\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.466851 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbvzr\" (UniqueName: \"kubernetes.io/projected/781e294b-35ba-43e1-ab5b-dfa72224bf72-kube-api-access-mbvzr\") pod \"781e294b-35ba-43e1-ab5b-dfa72224bf72\" (UID: \"781e294b-35ba-43e1-ab5b-dfa72224bf72\") " Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.468507 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/781e294b-35ba-43e1-ab5b-dfa72224bf72-logs" (OuterVolumeSpecName: "logs") pod "781e294b-35ba-43e1-ab5b-dfa72224bf72" (UID: "781e294b-35ba-43e1-ab5b-dfa72224bf72"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.473444 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/781e294b-35ba-43e1-ab5b-dfa72224bf72-kube-api-access-mbvzr" (OuterVolumeSpecName: "kube-api-access-mbvzr") pod "781e294b-35ba-43e1-ab5b-dfa72224bf72" (UID: "781e294b-35ba-43e1-ab5b-dfa72224bf72"). InnerVolumeSpecName "kube-api-access-mbvzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.501006 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "781e294b-35ba-43e1-ab5b-dfa72224bf72" (UID: "781e294b-35ba-43e1-ab5b-dfa72224bf72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.501550 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-config-data" (OuterVolumeSpecName: "config-data") pod "781e294b-35ba-43e1-ab5b-dfa72224bf72" (UID: "781e294b-35ba-43e1-ab5b-dfa72224bf72"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.569339 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rhhm\" (UniqueName: \"kubernetes.io/projected/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-kube-api-access-4rhhm\") pod \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.569521 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-config-data\") pod \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.569776 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-combined-ca-bundle\") pod \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\" (UID: \"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185\") " Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.570681 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.570725 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbvzr\" (UniqueName: \"kubernetes.io/projected/781e294b-35ba-43e1-ab5b-dfa72224bf72-kube-api-access-mbvzr\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.570753 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/781e294b-35ba-43e1-ab5b-dfa72224bf72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.570779 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/781e294b-35ba-43e1-ab5b-dfa72224bf72-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.575484 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-kube-api-access-4rhhm" (OuterVolumeSpecName: "kube-api-access-4rhhm") pod "ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" (UID: "ef0d0ecb-ed8f-407e-9fa6-4f74443f3185"). InnerVolumeSpecName "kube-api-access-4rhhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.593348 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-config-data" (OuterVolumeSpecName: "config-data") pod "ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" (UID: "ef0d0ecb-ed8f-407e-9fa6-4f74443f3185"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.607977 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" (UID: "ef0d0ecb-ed8f-407e-9fa6-4f74443f3185"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.672935 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.672984 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rhhm\" (UniqueName: \"kubernetes.io/projected/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-kube-api-access-4rhhm\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:44 crc kubenswrapper[4869]: I0929 14:03:44.673003 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.330731 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.330729 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ef0d0ecb-ed8f-407e-9fa6-4f74443f3185","Type":"ContainerDied","Data":"2629e6b60283fe535dfe715a667b6f9b0d7e9f6aed0f2211e5100eceda682cd3"} Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.331183 4869 scope.go:117] "RemoveContainer" containerID="5a9ab96f48ed94ad601cb05442902b25006ce1b9d67c996493c7cc5a97e7ec5d" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.330783 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.395728 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.402925 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.415022 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.428085 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.437209 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:45 crc kubenswrapper[4869]: E0929 14:03:45.438773 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerName="nova-metadata-log" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.438794 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerName="nova-metadata-log" Sep 29 14:03:45 crc kubenswrapper[4869]: E0929 14:03:45.438807 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerName="nova-metadata-metadata" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.438815 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerName="nova-metadata-metadata" Sep 29 14:03:45 crc kubenswrapper[4869]: E0929 14:03:45.438830 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" containerName="nova-cell1-novncproxy-novncproxy" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.438836 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" containerName="nova-cell1-novncproxy-novncproxy" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.439032 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" containerName="nova-cell1-novncproxy-novncproxy" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.439050 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerName="nova-metadata-log" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.439066 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" containerName="nova-metadata-metadata" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.439702 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.442927 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.443085 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.443081 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.445381 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.467486 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.478074 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.480928 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.484005 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.505146 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.597570 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.597789 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h2cc\" (UniqueName: \"kubernetes.io/projected/ff1a02ba-2862-4062-8cb8-93c2904b7278-kube-api-access-7h2cc\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.597856 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.597903 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.598086 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff1a02ba-2862-4062-8cb8-93c2904b7278-logs\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.598207 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68lfh\" (UniqueName: \"kubernetes.io/projected/5da9a418-1287-499a-91b7-a2bbed6a18ef-kube-api-access-68lfh\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.598271 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.598309 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.598349 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-config-data\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.598419 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.700392 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68lfh\" (UniqueName: \"kubernetes.io/projected/5da9a418-1287-499a-91b7-a2bbed6a18ef-kube-api-access-68lfh\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.700788 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.700816 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.700850 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-config-data\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.700885 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.700950 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.700973 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h2cc\" (UniqueName: \"kubernetes.io/projected/ff1a02ba-2862-4062-8cb8-93c2904b7278-kube-api-access-7h2cc\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.700992 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.701012 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.701040 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff1a02ba-2862-4062-8cb8-93c2904b7278-logs\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.701583 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff1a02ba-2862-4062-8cb8-93c2904b7278-logs\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.715485 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.715486 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.715486 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.715628 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-config-data\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.716057 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5da9a418-1287-499a-91b7-a2bbed6a18ef-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.716440 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.718202 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.728053 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h2cc\" (UniqueName: \"kubernetes.io/projected/ff1a02ba-2862-4062-8cb8-93c2904b7278-kube-api-access-7h2cc\") pod \"nova-metadata-0\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " pod="openstack/nova-metadata-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.734177 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68lfh\" (UniqueName: \"kubernetes.io/projected/5da9a418-1287-499a-91b7-a2bbed6a18ef-kube-api-access-68lfh\") pod \"nova-cell1-novncproxy-0\" (UID: \"5da9a418-1287-499a-91b7-a2bbed6a18ef\") " pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.780703 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:45 crc kubenswrapper[4869]: I0929 14:03:45.810429 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.259536 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="781e294b-35ba-43e1-ab5b-dfa72224bf72" path="/var/lib/kubelet/pods/781e294b-35ba-43e1-ab5b-dfa72224bf72/volumes" Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.261124 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef0d0ecb-ed8f-407e-9fa6-4f74443f3185" path="/var/lib/kubelet/pods/ef0d0ecb-ed8f-407e-9fa6-4f74443f3185/volumes" Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.278619 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Sep 29 14:03:46 crc kubenswrapper[4869]: W0929 14:03:46.283543 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5da9a418_1287_499a_91b7_a2bbed6a18ef.slice/crio-31b5b8a70208b4a8aa732104d022421d77debc4d9806082437ff6f8ed6190e2e WatchSource:0}: Error finding container 31b5b8a70208b4a8aa732104d022421d77debc4d9806082437ff6f8ed6190e2e: Status 404 returned error can't find the container with id 31b5b8a70208b4a8aa732104d022421d77debc4d9806082437ff6f8ed6190e2e Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.345395 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5da9a418-1287-499a-91b7-a2bbed6a18ef","Type":"ContainerStarted","Data":"31b5b8a70208b4a8aa732104d022421d77debc4d9806082437ff6f8ed6190e2e"} Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.371415 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:03:46 crc kubenswrapper[4869]: W0929 14:03:46.388492 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff1a02ba_2862_4062_8cb8_93c2904b7278.slice/crio-4aeabb3a5b867ac473f2dc7baa301e6ee910aeb3881ac1855917a956b48dc557 WatchSource:0}: Error finding container 4aeabb3a5b867ac473f2dc7baa301e6ee910aeb3881ac1855917a956b48dc557: Status 404 returned error can't find the container with id 4aeabb3a5b867ac473f2dc7baa301e6ee910aeb3881ac1855917a956b48dc557 Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.506190 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.507688 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.513098 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Sep 29 14:03:46 crc kubenswrapper[4869]: I0929 14:03:46.518919 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.372309 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5da9a418-1287-499a-91b7-a2bbed6a18ef","Type":"ContainerStarted","Data":"50a06baa4a2999010381f3b5560d9ad9ff6d8deceb7b95f902ceb4d14ef1fa84"} Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.375091 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ff1a02ba-2862-4062-8cb8-93c2904b7278","Type":"ContainerStarted","Data":"f9aba5df0ce5305c558e1f5ef72fce5e13ea3f008f18888527eea5c4b2fa2c31"} Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.375125 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ff1a02ba-2862-4062-8cb8-93c2904b7278","Type":"ContainerStarted","Data":"e3906a0f79e0170154173258bec818e8d1d2eeaf724cf78c7cfeaec8afcd4346"} Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.375139 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ff1a02ba-2862-4062-8cb8-93c2904b7278","Type":"ContainerStarted","Data":"4aeabb3a5b867ac473f2dc7baa301e6ee910aeb3881ac1855917a956b48dc557"} Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.375518 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.386012 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.406052 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.406002826 podStartE2EDuration="2.406002826s" podCreationTimestamp="2025-09-29 14:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:47.396809527 +0000 UTC m=+1353.837453857" watchObservedRunningTime="2025-09-29 14:03:47.406002826 +0000 UTC m=+1353.846647146" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.456335 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.4563164840000002 podStartE2EDuration="2.456316484s" podCreationTimestamp="2025-09-29 14:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:47.455911033 +0000 UTC m=+1353.896555353" watchObservedRunningTime="2025-09-29 14:03:47.456316484 +0000 UTC m=+1353.896960794" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.618539 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-585f5c457c-pn6z2"] Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.620993 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.685217 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-585f5c457c-pn6z2"] Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.741182 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-config\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.741238 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-nb\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.741261 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-sb\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.741347 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmkdh\" (UniqueName: \"kubernetes.io/projected/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-kube-api-access-dmkdh\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.741389 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-dns-svc\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.843161 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-config\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.843224 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-nb\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.843279 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-sb\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.843385 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmkdh\" (UniqueName: \"kubernetes.io/projected/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-kube-api-access-dmkdh\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.843454 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-dns-svc\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.844262 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-config\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.844280 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-sb\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.844310 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-nb\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.844335 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-dns-svc\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.869147 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmkdh\" (UniqueName: \"kubernetes.io/projected/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-kube-api-access-dmkdh\") pod \"dnsmasq-dns-585f5c457c-pn6z2\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:47 crc kubenswrapper[4869]: I0929 14:03:47.972659 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:48 crc kubenswrapper[4869]: I0929 14:03:48.511067 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-585f5c457c-pn6z2"] Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.372733 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.373694 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="ceilometer-central-agent" containerID="cri-o://a57a7cd3ac16bc2a07eaf2658fb61b98eacaea0d08dbb5c0bb66aa8002832b65" gracePeriod=30 Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.373856 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="proxy-httpd" containerID="cri-o://98d7978396ea6ea76398d328ef1ae00a461ccb86dc2feeb0c42201a8c7a102e3" gracePeriod=30 Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.373931 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="sg-core" containerID="cri-o://8261359bd9f4ead2edda41ef34936a0a8da3d2fd754c1e4e7f83286f6712880e" gracePeriod=30 Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.373969 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="ceilometer-notification-agent" containerID="cri-o://db849fc9c05c2d80820531e5dff75c7a8036cafc7b659e5a4f9b50d0ea597783" gracePeriod=30 Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.382195 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.202:3000/\": EOF" Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.400246 4869 generic.go:334] "Generic (PLEG): container finished" podID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" containerID="c9e51ccc006bcffa164301b1d8f074fae69a671c201a2308803e2d64cb2eb308" exitCode=0 Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.400359 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" event={"ID":"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1","Type":"ContainerDied","Data":"c9e51ccc006bcffa164301b1d8f074fae69a671c201a2308803e2d64cb2eb308"} Sep 29 14:03:49 crc kubenswrapper[4869]: I0929 14:03:49.400428 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" event={"ID":"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1","Type":"ContainerStarted","Data":"5778eea00cd61c949971cb3655e441bf8d4a129f84b825a80a747c93a3f8ae5c"} Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.414352 4869 generic.go:334] "Generic (PLEG): container finished" podID="036d500b-0aad-41e4-8272-7b0632f02c69" containerID="98d7978396ea6ea76398d328ef1ae00a461ccb86dc2feeb0c42201a8c7a102e3" exitCode=0 Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.414735 4869 generic.go:334] "Generic (PLEG): container finished" podID="036d500b-0aad-41e4-8272-7b0632f02c69" containerID="8261359bd9f4ead2edda41ef34936a0a8da3d2fd754c1e4e7f83286f6712880e" exitCode=2 Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.414745 4869 generic.go:334] "Generic (PLEG): container finished" podID="036d500b-0aad-41e4-8272-7b0632f02c69" containerID="a57a7cd3ac16bc2a07eaf2658fb61b98eacaea0d08dbb5c0bb66aa8002832b65" exitCode=0 Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.414450 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerDied","Data":"98d7978396ea6ea76398d328ef1ae00a461ccb86dc2feeb0c42201a8c7a102e3"} Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.414812 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerDied","Data":"8261359bd9f4ead2edda41ef34936a0a8da3d2fd754c1e4e7f83286f6712880e"} Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.414829 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerDied","Data":"a57a7cd3ac16bc2a07eaf2658fb61b98eacaea0d08dbb5c0bb66aa8002832b65"} Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.417174 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" event={"ID":"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1","Type":"ContainerStarted","Data":"491b0dfa6bb97ec839de67306bdf16afc8a5fa9ab609437e137d20f99f7961f9"} Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.417298 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.445249 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" podStartSLOduration=3.4452299330000002 podStartE2EDuration="3.445229933s" podCreationTimestamp="2025-09-29 14:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:50.442530142 +0000 UTC m=+1356.883174462" watchObservedRunningTime="2025-09-29 14:03:50.445229933 +0000 UTC m=+1356.885874263" Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.592590 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.592811 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-log" containerID="cri-o://a652789c225b722a428cba07e153bc86a33d68301d7e40e54bab6f1679b93e6a" gracePeriod=30 Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.592923 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-api" containerID="cri-o://fee3e261eb7be71d7308513a7cc85b7813936eebf468064a96d423941a88b347" gracePeriod=30 Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.657561 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.657936 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.658009 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.658807 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9bb29cba4ec61eaadf6d94eecf52b0683364614417f5603e9f08f9d5b6ae2413"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.658872 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://9bb29cba4ec61eaadf6d94eecf52b0683364614417f5603e9f08f9d5b6ae2413" gracePeriod=600 Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.781306 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.818557 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Sep 29 14:03:50 crc kubenswrapper[4869]: I0929 14:03:50.818627 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Sep 29 14:03:51 crc kubenswrapper[4869]: I0929 14:03:51.432065 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="9bb29cba4ec61eaadf6d94eecf52b0683364614417f5603e9f08f9d5b6ae2413" exitCode=0 Sep 29 14:03:51 crc kubenswrapper[4869]: I0929 14:03:51.432148 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"9bb29cba4ec61eaadf6d94eecf52b0683364614417f5603e9f08f9d5b6ae2413"} Sep 29 14:03:51 crc kubenswrapper[4869]: I0929 14:03:51.433478 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac"} Sep 29 14:03:51 crc kubenswrapper[4869]: I0929 14:03:51.433583 4869 scope.go:117] "RemoveContainer" containerID="67f7d50b20b583fdfc73b613bb7e9647bd7a3b5d2b7aab39171da5a668956c60" Sep 29 14:03:51 crc kubenswrapper[4869]: I0929 14:03:51.441927 4869 generic.go:334] "Generic (PLEG): container finished" podID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerID="a652789c225b722a428cba07e153bc86a33d68301d7e40e54bab6f1679b93e6a" exitCode=143 Sep 29 14:03:51 crc kubenswrapper[4869]: I0929 14:03:51.442067 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07263f09-050e-4ff4-845c-2fa6f2893dbf","Type":"ContainerDied","Data":"a652789c225b722a428cba07e153bc86a33d68301d7e40e54bab6f1679b93e6a"} Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.462018 4869 generic.go:334] "Generic (PLEG): container finished" podID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerID="fee3e261eb7be71d7308513a7cc85b7813936eebf468064a96d423941a88b347" exitCode=0 Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.462242 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07263f09-050e-4ff4-845c-2fa6f2893dbf","Type":"ContainerDied","Data":"fee3e261eb7be71d7308513a7cc85b7813936eebf468064a96d423941a88b347"} Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.700457 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.754929 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-combined-ca-bundle\") pod \"07263f09-050e-4ff4-845c-2fa6f2893dbf\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.755381 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07263f09-050e-4ff4-845c-2fa6f2893dbf-logs\") pod \"07263f09-050e-4ff4-845c-2fa6f2893dbf\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.755498 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-config-data\") pod \"07263f09-050e-4ff4-845c-2fa6f2893dbf\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.755527 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xz5k2\" (UniqueName: \"kubernetes.io/projected/07263f09-050e-4ff4-845c-2fa6f2893dbf-kube-api-access-xz5k2\") pod \"07263f09-050e-4ff4-845c-2fa6f2893dbf\" (UID: \"07263f09-050e-4ff4-845c-2fa6f2893dbf\") " Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.756044 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07263f09-050e-4ff4-845c-2fa6f2893dbf-logs" (OuterVolumeSpecName: "logs") pod "07263f09-050e-4ff4-845c-2fa6f2893dbf" (UID: "07263f09-050e-4ff4-845c-2fa6f2893dbf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.756420 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07263f09-050e-4ff4-845c-2fa6f2893dbf-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.760568 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07263f09-050e-4ff4-845c-2fa6f2893dbf-kube-api-access-xz5k2" (OuterVolumeSpecName: "kube-api-access-xz5k2") pod "07263f09-050e-4ff4-845c-2fa6f2893dbf" (UID: "07263f09-050e-4ff4-845c-2fa6f2893dbf"). InnerVolumeSpecName "kube-api-access-xz5k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.794843 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-config-data" (OuterVolumeSpecName: "config-data") pod "07263f09-050e-4ff4-845c-2fa6f2893dbf" (UID: "07263f09-050e-4ff4-845c-2fa6f2893dbf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.803969 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07263f09-050e-4ff4-845c-2fa6f2893dbf" (UID: "07263f09-050e-4ff4-845c-2fa6f2893dbf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.858380 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xz5k2\" (UniqueName: \"kubernetes.io/projected/07263f09-050e-4ff4-845c-2fa6f2893dbf-kube-api-access-xz5k2\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.858415 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:52 crc kubenswrapper[4869]: I0929 14:03:52.858425 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07263f09-050e-4ff4-845c-2fa6f2893dbf-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.472457 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07263f09-050e-4ff4-845c-2fa6f2893dbf","Type":"ContainerDied","Data":"f989676be59b272efb9c5ac3c5338686ee7c4c311227578f7afb230ca98c3423"} Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.472510 4869 scope.go:117] "RemoveContainer" containerID="fee3e261eb7be71d7308513a7cc85b7813936eebf468064a96d423941a88b347" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.472536 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.498801 4869 scope.go:117] "RemoveContainer" containerID="a652789c225b722a428cba07e153bc86a33d68301d7e40e54bab6f1679b93e6a" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.507097 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.529240 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.540368 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:53 crc kubenswrapper[4869]: E0929 14:03:53.540813 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-api" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.540833 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-api" Sep 29 14:03:53 crc kubenswrapper[4869]: E0929 14:03:53.540844 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-log" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.540851 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-log" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.541038 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-api" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.541052 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" containerName="nova-api-log" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.542384 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.545892 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.546063 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.546439 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.564875 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.671899 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.671947 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9xrb\" (UniqueName: \"kubernetes.io/projected/f1e21cae-762f-4dd0-812f-a82f49c208b1-kube-api-access-n9xrb\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.671996 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.672042 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-config-data\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.672065 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.672103 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e21cae-762f-4dd0-812f-a82f49c208b1-logs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.773620 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.773669 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9xrb\" (UniqueName: \"kubernetes.io/projected/f1e21cae-762f-4dd0-812f-a82f49c208b1-kube-api-access-n9xrb\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.773721 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.773769 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-config-data\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.773794 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.773831 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e21cae-762f-4dd0-812f-a82f49c208b1-logs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.774248 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e21cae-762f-4dd0-812f-a82f49c208b1-logs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.779650 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.780028 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.780309 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-config-data\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.782095 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.792778 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9xrb\" (UniqueName: \"kubernetes.io/projected/f1e21cae-762f-4dd0-812f-a82f49c208b1-kube-api-access-n9xrb\") pod \"nova-api-0\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " pod="openstack/nova-api-0" Sep 29 14:03:53 crc kubenswrapper[4869]: I0929 14:03:53.865439 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:03:54 crc kubenswrapper[4869]: I0929 14:03:54.252607 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07263f09-050e-4ff4-845c-2fa6f2893dbf" path="/var/lib/kubelet/pods/07263f09-050e-4ff4-845c-2fa6f2893dbf/volumes" Sep 29 14:03:54 crc kubenswrapper[4869]: W0929 14:03:54.303554 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1e21cae_762f_4dd0_812f_a82f49c208b1.slice/crio-e8eecf1b52c9cb341e81b050e6b772c0b426308fb24b9e37134f1f630cfe6c23 WatchSource:0}: Error finding container e8eecf1b52c9cb341e81b050e6b772c0b426308fb24b9e37134f1f630cfe6c23: Status 404 returned error can't find the container with id e8eecf1b52c9cb341e81b050e6b772c0b426308fb24b9e37134f1f630cfe6c23 Sep 29 14:03:54 crc kubenswrapper[4869]: I0929 14:03:54.304843 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:03:54 crc kubenswrapper[4869]: I0929 14:03:54.502371 4869 generic.go:334] "Generic (PLEG): container finished" podID="036d500b-0aad-41e4-8272-7b0632f02c69" containerID="db849fc9c05c2d80820531e5dff75c7a8036cafc7b659e5a4f9b50d0ea597783" exitCode=0 Sep 29 14:03:54 crc kubenswrapper[4869]: I0929 14:03:54.502806 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerDied","Data":"db849fc9c05c2d80820531e5dff75c7a8036cafc7b659e5a4f9b50d0ea597783"} Sep 29 14:03:54 crc kubenswrapper[4869]: I0929 14:03:54.504186 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1e21cae-762f-4dd0-812f-a82f49c208b1","Type":"ContainerStarted","Data":"7cf09f77c3676376915d5fd1e54227c0c7a8297640d9eafe683bf1f26e1f8cb4"} Sep 29 14:03:54 crc kubenswrapper[4869]: I0929 14:03:54.504245 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1e21cae-762f-4dd0-812f-a82f49c208b1","Type":"ContainerStarted","Data":"e8eecf1b52c9cb341e81b050e6b772c0b426308fb24b9e37134f1f630cfe6c23"} Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.027394 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.134723 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-log-httpd\") pod \"036d500b-0aad-41e4-8272-7b0632f02c69\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.134808 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-ceilometer-tls-certs\") pod \"036d500b-0aad-41e4-8272-7b0632f02c69\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.134835 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-config-data\") pod \"036d500b-0aad-41e4-8272-7b0632f02c69\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.134878 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-combined-ca-bundle\") pod \"036d500b-0aad-41e4-8272-7b0632f02c69\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.135002 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-scripts\") pod \"036d500b-0aad-41e4-8272-7b0632f02c69\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.135048 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-run-httpd\") pod \"036d500b-0aad-41e4-8272-7b0632f02c69\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.135087 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7297\" (UniqueName: \"kubernetes.io/projected/036d500b-0aad-41e4-8272-7b0632f02c69-kube-api-access-s7297\") pod \"036d500b-0aad-41e4-8272-7b0632f02c69\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.135108 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-sg-core-conf-yaml\") pod \"036d500b-0aad-41e4-8272-7b0632f02c69\" (UID: \"036d500b-0aad-41e4-8272-7b0632f02c69\") " Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.139697 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "036d500b-0aad-41e4-8272-7b0632f02c69" (UID: "036d500b-0aad-41e4-8272-7b0632f02c69"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.139850 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "036d500b-0aad-41e4-8272-7b0632f02c69" (UID: "036d500b-0aad-41e4-8272-7b0632f02c69"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.148983 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-scripts" (OuterVolumeSpecName: "scripts") pod "036d500b-0aad-41e4-8272-7b0632f02c69" (UID: "036d500b-0aad-41e4-8272-7b0632f02c69"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.149643 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036d500b-0aad-41e4-8272-7b0632f02c69-kube-api-access-s7297" (OuterVolumeSpecName: "kube-api-access-s7297") pod "036d500b-0aad-41e4-8272-7b0632f02c69" (UID: "036d500b-0aad-41e4-8272-7b0632f02c69"). InnerVolumeSpecName "kube-api-access-s7297". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.182099 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "036d500b-0aad-41e4-8272-7b0632f02c69" (UID: "036d500b-0aad-41e4-8272-7b0632f02c69"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.189419 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "036d500b-0aad-41e4-8272-7b0632f02c69" (UID: "036d500b-0aad-41e4-8272-7b0632f02c69"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.237855 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "036d500b-0aad-41e4-8272-7b0632f02c69" (UID: "036d500b-0aad-41e4-8272-7b0632f02c69"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.243140 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.243419 4869 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-run-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.243577 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7297\" (UniqueName: \"kubernetes.io/projected/036d500b-0aad-41e4-8272-7b0632f02c69-kube-api-access-s7297\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.243596 4869 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.243629 4869 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/036d500b-0aad-41e4-8272-7b0632f02c69-log-httpd\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.243640 4869 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.243649 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.256374 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-config-data" (OuterVolumeSpecName: "config-data") pod "036d500b-0aad-41e4-8272-7b0632f02c69" (UID: "036d500b-0aad-41e4-8272-7b0632f02c69"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.345787 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d500b-0aad-41e4-8272-7b0632f02c69-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.518425 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"036d500b-0aad-41e4-8272-7b0632f02c69","Type":"ContainerDied","Data":"2a6a6a848c875201f111a39759bbba75d80a0236b347821eb14addef4491b6ce"} Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.518476 4869 scope.go:117] "RemoveContainer" containerID="98d7978396ea6ea76398d328ef1ae00a461ccb86dc2feeb0c42201a8c7a102e3" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.518488 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.520801 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1e21cae-762f-4dd0-812f-a82f49c208b1","Type":"ContainerStarted","Data":"cf0642da47c5e2e1dd99bb8506e41f5e6c985150e2038a1fe585d4dc487d4f22"} Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.539806 4869 scope.go:117] "RemoveContainer" containerID="8261359bd9f4ead2edda41ef34936a0a8da3d2fd754c1e4e7f83286f6712880e" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.545144 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.545128279 podStartE2EDuration="2.545128279s" podCreationTimestamp="2025-09-29 14:03:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:55.541528676 +0000 UTC m=+1361.982173026" watchObservedRunningTime="2025-09-29 14:03:55.545128279 +0000 UTC m=+1361.985772589" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.566673 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.568394 4869 scope.go:117] "RemoveContainer" containerID="db849fc9c05c2d80820531e5dff75c7a8036cafc7b659e5a4f9b50d0ea597783" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.571409 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.587246 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:55 crc kubenswrapper[4869]: E0929 14:03:55.588064 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="ceilometer-notification-agent" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.588147 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="ceilometer-notification-agent" Sep 29 14:03:55 crc kubenswrapper[4869]: E0929 14:03:55.588229 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="ceilometer-central-agent" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.588330 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="ceilometer-central-agent" Sep 29 14:03:55 crc kubenswrapper[4869]: E0929 14:03:55.588449 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="sg-core" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.588527 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="sg-core" Sep 29 14:03:55 crc kubenswrapper[4869]: E0929 14:03:55.588627 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="proxy-httpd" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.588703 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="proxy-httpd" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.589157 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="ceilometer-central-agent" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.589284 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="ceilometer-notification-agent" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.589439 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="sg-core" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.589573 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" containerName="proxy-httpd" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.592894 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.595974 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.596870 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.597090 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.597120 4869 scope.go:117] "RemoveContainer" containerID="a57a7cd3ac16bc2a07eaf2658fb61b98eacaea0d08dbb5c0bb66aa8002832b65" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.613871 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.753556 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-scripts\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.753591 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.753633 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks7ks\" (UniqueName: \"kubernetes.io/projected/a5bafc2b-0923-47c0-845b-6470702822ec-kube-api-access-ks7ks\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.754285 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.754348 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.754378 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5bafc2b-0923-47c0-845b-6470702822ec-run-httpd\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.754541 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5bafc2b-0923-47c0-845b-6470702822ec-log-httpd\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.754588 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-config-data\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.781841 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.805562 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.811782 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.812047 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.855819 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks7ks\" (UniqueName: \"kubernetes.io/projected/a5bafc2b-0923-47c0-845b-6470702822ec-kube-api-access-ks7ks\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.855985 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.856005 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.856021 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5bafc2b-0923-47c0-845b-6470702822ec-run-httpd\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.856063 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5bafc2b-0923-47c0-845b-6470702822ec-log-httpd\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.856079 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-config-data\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.856127 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-scripts\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.856143 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.857865 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5bafc2b-0923-47c0-845b-6470702822ec-log-httpd\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.858222 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5bafc2b-0923-47c0-845b-6470702822ec-run-httpd\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.864152 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.865060 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.877113 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-config-data\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.877667 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-scripts\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.878178 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks7ks\" (UniqueName: \"kubernetes.io/projected/a5bafc2b-0923-47c0-845b-6470702822ec-kube-api-access-ks7ks\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.878174 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5bafc2b-0923-47c0-845b-6470702822ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5bafc2b-0923-47c0-845b-6470702822ec\") " pod="openstack/ceilometer-0" Sep 29 14:03:55 crc kubenswrapper[4869]: I0929 14:03:55.917555 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.253166 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="036d500b-0aad-41e4-8272-7b0632f02c69" path="/var/lib/kubelet/pods/036d500b-0aad-41e4-8272-7b0632f02c69/volumes" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.372993 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.545058 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5bafc2b-0923-47c0-845b-6470702822ec","Type":"ContainerStarted","Data":"e27fc4dd159b05fa354ff1c5ed0255538d71a280d55c382419201d241b79eaf5"} Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.570050 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.772722 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-dsc87"] Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.774633 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.777584 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.779876 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.784469 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-dsc87"] Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.830543 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.830544 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.878620 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-config-data\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.878794 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ffcq\" (UniqueName: \"kubernetes.io/projected/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-kube-api-access-8ffcq\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.878875 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.878943 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-scripts\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.980125 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.980179 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-scripts\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.980253 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-config-data\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.980309 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ffcq\" (UniqueName: \"kubernetes.io/projected/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-kube-api-access-8ffcq\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:56 crc kubenswrapper[4869]: I0929 14:03:56.985428 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:57 crc kubenswrapper[4869]: I0929 14:03:57.001332 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-config-data\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:57 crc kubenswrapper[4869]: I0929 14:03:57.002003 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ffcq\" (UniqueName: \"kubernetes.io/projected/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-kube-api-access-8ffcq\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:57 crc kubenswrapper[4869]: I0929 14:03:57.021102 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-scripts\") pod \"nova-cell1-cell-mapping-dsc87\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:57 crc kubenswrapper[4869]: I0929 14:03:57.199287 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:03:57 crc kubenswrapper[4869]: I0929 14:03:57.634523 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5bafc2b-0923-47c0-845b-6470702822ec","Type":"ContainerStarted","Data":"31c8f296857aba9e5810fd40a973a8ce58e6c36854a331cb9dec40e598b5fa46"} Sep 29 14:03:57 crc kubenswrapper[4869]: I0929 14:03:57.634973 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5bafc2b-0923-47c0-845b-6470702822ec","Type":"ContainerStarted","Data":"5897c696b79c8c96956f40821dda565e964ce3fbea123e74a51c1cbb9f3ec14a"} Sep 29 14:03:57 crc kubenswrapper[4869]: I0929 14:03:57.655740 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-dsc87"] Sep 29 14:03:57 crc kubenswrapper[4869]: I0929 14:03:57.975083 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.034620 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d758d5cd9-c8rjg"] Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.034854 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" podUID="4c5be2a8-3d7f-4360-85dc-dc3535017311" containerName="dnsmasq-dns" containerID="cri-o://20bb7d3bc8e87e2f7be0f31ddc6fe5c244a1b0fe7bda35abbdf4710bd3dd7260" gracePeriod=10 Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.650438 4869 generic.go:334] "Generic (PLEG): container finished" podID="4c5be2a8-3d7f-4360-85dc-dc3535017311" containerID="20bb7d3bc8e87e2f7be0f31ddc6fe5c244a1b0fe7bda35abbdf4710bd3dd7260" exitCode=0 Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.650635 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" event={"ID":"4c5be2a8-3d7f-4360-85dc-dc3535017311","Type":"ContainerDied","Data":"20bb7d3bc8e87e2f7be0f31ddc6fe5c244a1b0fe7bda35abbdf4710bd3dd7260"} Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.650838 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" event={"ID":"4c5be2a8-3d7f-4360-85dc-dc3535017311","Type":"ContainerDied","Data":"74f357227b16e3d639653286005deff35af2de05656501d7d608d17b2eedf5d2"} Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.650857 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74f357227b16e3d639653286005deff35af2de05656501d7d608d17b2eedf5d2" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.661264 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5bafc2b-0923-47c0-845b-6470702822ec","Type":"ContainerStarted","Data":"44fdcbc972190f07a0c9ef7cc4c30f6b4e855e78d525cb6cb71d82d9d07cc6d7"} Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.663685 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dsc87" event={"ID":"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae","Type":"ContainerStarted","Data":"54ad67e4a9f24eaa93d94751756b25ff2040da3c356af741a4674910cdd067ea"} Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.663715 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dsc87" event={"ID":"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae","Type":"ContainerStarted","Data":"0e4f6cbd97eda49dcffd785bb7b97e7aca7225c477a879d3bf2bf68cc88399eb"} Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.712048 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.737815 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-dsc87" podStartSLOduration=2.7377973410000003 podStartE2EDuration="2.737797341s" podCreationTimestamp="2025-09-29 14:03:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:03:58.686864775 +0000 UTC m=+1365.127509095" watchObservedRunningTime="2025-09-29 14:03:58.737797341 +0000 UTC m=+1365.178441661" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.821258 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-sb\") pod \"4c5be2a8-3d7f-4360-85dc-dc3535017311\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.821367 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4x9r2\" (UniqueName: \"kubernetes.io/projected/4c5be2a8-3d7f-4360-85dc-dc3535017311-kube-api-access-4x9r2\") pod \"4c5be2a8-3d7f-4360-85dc-dc3535017311\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.821585 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-nb\") pod \"4c5be2a8-3d7f-4360-85dc-dc3535017311\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.822198 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-config\") pod \"4c5be2a8-3d7f-4360-85dc-dc3535017311\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.822232 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-dns-svc\") pod \"4c5be2a8-3d7f-4360-85dc-dc3535017311\" (UID: \"4c5be2a8-3d7f-4360-85dc-dc3535017311\") " Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.826337 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c5be2a8-3d7f-4360-85dc-dc3535017311-kube-api-access-4x9r2" (OuterVolumeSpecName: "kube-api-access-4x9r2") pod "4c5be2a8-3d7f-4360-85dc-dc3535017311" (UID: "4c5be2a8-3d7f-4360-85dc-dc3535017311"). InnerVolumeSpecName "kube-api-access-4x9r2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.891585 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-config" (OuterVolumeSpecName: "config") pod "4c5be2a8-3d7f-4360-85dc-dc3535017311" (UID: "4c5be2a8-3d7f-4360-85dc-dc3535017311"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.899830 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4c5be2a8-3d7f-4360-85dc-dc3535017311" (UID: "4c5be2a8-3d7f-4360-85dc-dc3535017311"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.925095 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4x9r2\" (UniqueName: \"kubernetes.io/projected/4c5be2a8-3d7f-4360-85dc-dc3535017311-kube-api-access-4x9r2\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.925138 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.925152 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.943312 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4c5be2a8-3d7f-4360-85dc-dc3535017311" (UID: "4c5be2a8-3d7f-4360-85dc-dc3535017311"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:03:58 crc kubenswrapper[4869]: I0929 14:03:58.949491 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4c5be2a8-3d7f-4360-85dc-dc3535017311" (UID: "4c5be2a8-3d7f-4360-85dc-dc3535017311"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:03:59 crc kubenswrapper[4869]: I0929 14:03:59.027304 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:59 crc kubenswrapper[4869]: I0929 14:03:59.027513 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c5be2a8-3d7f-4360-85dc-dc3535017311-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:03:59 crc kubenswrapper[4869]: I0929 14:03:59.675596 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5bafc2b-0923-47c0-845b-6470702822ec","Type":"ContainerStarted","Data":"21d75f3b5dd404cb83cb4b6d3efd14e4db13e6adf8db97c8148e75e9a95d471a"} Sep 29 14:03:59 crc kubenswrapper[4869]: I0929 14:03:59.676032 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Sep 29 14:03:59 crc kubenswrapper[4869]: I0929 14:03:59.675652 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d758d5cd9-c8rjg" Sep 29 14:03:59 crc kubenswrapper[4869]: I0929 14:03:59.704418 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8478224330000002 podStartE2EDuration="4.704393985s" podCreationTimestamp="2025-09-29 14:03:55 +0000 UTC" firstStartedPulling="2025-09-29 14:03:56.381801898 +0000 UTC m=+1362.822446218" lastFinishedPulling="2025-09-29 14:03:59.23837346 +0000 UTC m=+1365.679017770" observedRunningTime="2025-09-29 14:03:59.701452639 +0000 UTC m=+1366.142096959" watchObservedRunningTime="2025-09-29 14:03:59.704393985 +0000 UTC m=+1366.145038305" Sep 29 14:03:59 crc kubenswrapper[4869]: I0929 14:03:59.726163 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d758d5cd9-c8rjg"] Sep 29 14:03:59 crc kubenswrapper[4869]: I0929 14:03:59.734142 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d758d5cd9-c8rjg"] Sep 29 14:04:00 crc kubenswrapper[4869]: I0929 14:04:00.259042 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c5be2a8-3d7f-4360-85dc-dc3535017311" path="/var/lib/kubelet/pods/4c5be2a8-3d7f-4360-85dc-dc3535017311/volumes" Sep 29 14:04:03 crc kubenswrapper[4869]: I0929 14:04:03.749863 4869 generic.go:334] "Generic (PLEG): container finished" podID="cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" containerID="54ad67e4a9f24eaa93d94751756b25ff2040da3c356af741a4674910cdd067ea" exitCode=0 Sep 29 14:04:03 crc kubenswrapper[4869]: I0929 14:04:03.749942 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dsc87" event={"ID":"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae","Type":"ContainerDied","Data":"54ad67e4a9f24eaa93d94751756b25ff2040da3c356af741a4674910cdd067ea"} Sep 29 14:04:03 crc kubenswrapper[4869]: I0929 14:04:03.866673 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Sep 29 14:04:03 crc kubenswrapper[4869]: I0929 14:04:03.866734 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Sep 29 14:04:04 crc kubenswrapper[4869]: I0929 14:04:04.878804 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Sep 29 14:04:04 crc kubenswrapper[4869]: I0929 14:04:04.878838 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.267826 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.383281 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-scripts\") pod \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.383327 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ffcq\" (UniqueName: \"kubernetes.io/projected/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-kube-api-access-8ffcq\") pod \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.383365 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-config-data\") pod \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.383485 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-combined-ca-bundle\") pod \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\" (UID: \"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae\") " Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.390457 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-scripts" (OuterVolumeSpecName: "scripts") pod "cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" (UID: "cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.395998 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-kube-api-access-8ffcq" (OuterVolumeSpecName: "kube-api-access-8ffcq") pod "cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" (UID: "cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae"). InnerVolumeSpecName "kube-api-access-8ffcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.415886 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-config-data" (OuterVolumeSpecName: "config-data") pod "cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" (UID: "cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.419814 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" (UID: "cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.487241 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.487278 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.487290 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ffcq\" (UniqueName: \"kubernetes.io/projected/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-kube-api-access-8ffcq\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.487300 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.768138 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dsc87" event={"ID":"cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae","Type":"ContainerDied","Data":"0e4f6cbd97eda49dcffd785bb7b97e7aca7225c477a879d3bf2bf68cc88399eb"} Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.768181 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e4f6cbd97eda49dcffd785bb7b97e7aca7225c477a879d3bf2bf68cc88399eb" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.768180 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dsc87" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.816223 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.820562 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Sep 29 14:04:05 crc kubenswrapper[4869]: I0929 14:04:05.823417 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.063005 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.063275 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-log" containerID="cri-o://7cf09f77c3676376915d5fd1e54227c0c7a8297640d9eafe683bf1f26e1f8cb4" gracePeriod=30 Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.063355 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-api" containerID="cri-o://cf0642da47c5e2e1dd99bb8506e41f5e6c985150e2038a1fe585d4dc487d4f22" gracePeriod=30 Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.077051 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.077304 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a0abade9-67cb-4289-94c3-78acda74b360" containerName="nova-scheduler-scheduler" containerID="cri-o://431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc" gracePeriod=30 Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.097215 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:04:06 crc kubenswrapper[4869]: E0929 14:04:06.515720 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Sep 29 14:04:06 crc kubenswrapper[4869]: E0929 14:04:06.517285 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Sep 29 14:04:06 crc kubenswrapper[4869]: E0929 14:04:06.518929 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Sep 29 14:04:06 crc kubenswrapper[4869]: E0929 14:04:06.518993 4869 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="a0abade9-67cb-4289-94c3-78acda74b360" containerName="nova-scheduler-scheduler" Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.778255 4869 generic.go:334] "Generic (PLEG): container finished" podID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerID="7cf09f77c3676376915d5fd1e54227c0c7a8297640d9eafe683bf1f26e1f8cb4" exitCode=143 Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.779458 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1e21cae-762f-4dd0-812f-a82f49c208b1","Type":"ContainerDied","Data":"7cf09f77c3676376915d5fd1e54227c0c7a8297640d9eafe683bf1f26e1f8cb4"} Sep 29 14:04:06 crc kubenswrapper[4869]: I0929 14:04:06.790676 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Sep 29 14:04:07 crc kubenswrapper[4869]: I0929 14:04:07.786006 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-log" containerID="cri-o://e3906a0f79e0170154173258bec818e8d1d2eeaf724cf78c7cfeaec8afcd4346" gracePeriod=30 Sep 29 14:04:07 crc kubenswrapper[4869]: I0929 14:04:07.786893 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-metadata" containerID="cri-o://f9aba5df0ce5305c558e1f5ef72fce5e13ea3f008f18888527eea5c4b2fa2c31" gracePeriod=30 Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.795310 4869 generic.go:334] "Generic (PLEG): container finished" podID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerID="f9aba5df0ce5305c558e1f5ef72fce5e13ea3f008f18888527eea5c4b2fa2c31" exitCode=0 Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.795650 4869 generic.go:334] "Generic (PLEG): container finished" podID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerID="e3906a0f79e0170154173258bec818e8d1d2eeaf724cf78c7cfeaec8afcd4346" exitCode=143 Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.795390 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ff1a02ba-2862-4062-8cb8-93c2904b7278","Type":"ContainerDied","Data":"f9aba5df0ce5305c558e1f5ef72fce5e13ea3f008f18888527eea5c4b2fa2c31"} Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.795692 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ff1a02ba-2862-4062-8cb8-93c2904b7278","Type":"ContainerDied","Data":"e3906a0f79e0170154173258bec818e8d1d2eeaf724cf78c7cfeaec8afcd4346"} Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.797860 4869 generic.go:334] "Generic (PLEG): container finished" podID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerID="cf0642da47c5e2e1dd99bb8506e41f5e6c985150e2038a1fe585d4dc487d4f22" exitCode=0 Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.797890 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1e21cae-762f-4dd0-812f-a82f49c208b1","Type":"ContainerDied","Data":"cf0642da47c5e2e1dd99bb8506e41f5e6c985150e2038a1fe585d4dc487d4f22"} Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.797914 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1e21cae-762f-4dd0-812f-a82f49c208b1","Type":"ContainerDied","Data":"e8eecf1b52c9cb341e81b050e6b772c0b426308fb24b9e37134f1f630cfe6c23"} Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.797926 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8eecf1b52c9cb341e81b050e6b772c0b426308fb24b9e37134f1f630cfe6c23" Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.841837 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.970624 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-combined-ca-bundle\") pod \"f1e21cae-762f-4dd0-812f-a82f49c208b1\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.970734 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e21cae-762f-4dd0-812f-a82f49c208b1-logs\") pod \"f1e21cae-762f-4dd0-812f-a82f49c208b1\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.970896 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-config-data\") pod \"f1e21cae-762f-4dd0-812f-a82f49c208b1\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.970973 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-public-tls-certs\") pod \"f1e21cae-762f-4dd0-812f-a82f49c208b1\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.970999 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9xrb\" (UniqueName: \"kubernetes.io/projected/f1e21cae-762f-4dd0-812f-a82f49c208b1-kube-api-access-n9xrb\") pod \"f1e21cae-762f-4dd0-812f-a82f49c208b1\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.971025 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-internal-tls-certs\") pod \"f1e21cae-762f-4dd0-812f-a82f49c208b1\" (UID: \"f1e21cae-762f-4dd0-812f-a82f49c208b1\") " Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.971750 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e21cae-762f-4dd0-812f-a82f49c208b1-logs" (OuterVolumeSpecName: "logs") pod "f1e21cae-762f-4dd0-812f-a82f49c208b1" (UID: "f1e21cae-762f-4dd0-812f-a82f49c208b1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:04:08 crc kubenswrapper[4869]: I0929 14:04:08.976997 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e21cae-762f-4dd0-812f-a82f49c208b1-kube-api-access-n9xrb" (OuterVolumeSpecName: "kube-api-access-n9xrb") pod "f1e21cae-762f-4dd0-812f-a82f49c208b1" (UID: "f1e21cae-762f-4dd0-812f-a82f49c208b1"). InnerVolumeSpecName "kube-api-access-n9xrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.000156 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-config-data" (OuterVolumeSpecName: "config-data") pod "f1e21cae-762f-4dd0-812f-a82f49c208b1" (UID: "f1e21cae-762f-4dd0-812f-a82f49c208b1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.002574 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1e21cae-762f-4dd0-812f-a82f49c208b1" (UID: "f1e21cae-762f-4dd0-812f-a82f49c208b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.024602 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f1e21cae-762f-4dd0-812f-a82f49c208b1" (UID: "f1e21cae-762f-4dd0-812f-a82f49c208b1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.026158 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f1e21cae-762f-4dd0-812f-a82f49c208b1" (UID: "f1e21cae-762f-4dd0-812f-a82f49c208b1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.073266 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e21cae-762f-4dd0-812f-a82f49c208b1-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.073333 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.073346 4869 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.073359 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9xrb\" (UniqueName: \"kubernetes.io/projected/f1e21cae-762f-4dd0-812f-a82f49c208b1-kube-api-access-n9xrb\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.073369 4869 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.073377 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e21cae-762f-4dd0-812f-a82f49c208b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.643534 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.784597 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7h2cc\" (UniqueName: \"kubernetes.io/projected/ff1a02ba-2862-4062-8cb8-93c2904b7278-kube-api-access-7h2cc\") pod \"ff1a02ba-2862-4062-8cb8-93c2904b7278\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.784735 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff1a02ba-2862-4062-8cb8-93c2904b7278-logs\") pod \"ff1a02ba-2862-4062-8cb8-93c2904b7278\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.784763 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-nova-metadata-tls-certs\") pod \"ff1a02ba-2862-4062-8cb8-93c2904b7278\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.784785 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-config-data\") pod \"ff1a02ba-2862-4062-8cb8-93c2904b7278\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.785466 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff1a02ba-2862-4062-8cb8-93c2904b7278-logs" (OuterVolumeSpecName: "logs") pod "ff1a02ba-2862-4062-8cb8-93c2904b7278" (UID: "ff1a02ba-2862-4062-8cb8-93c2904b7278"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.785534 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-combined-ca-bundle\") pod \"ff1a02ba-2862-4062-8cb8-93c2904b7278\" (UID: \"ff1a02ba-2862-4062-8cb8-93c2904b7278\") " Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.786001 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff1a02ba-2862-4062-8cb8-93c2904b7278-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.790930 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff1a02ba-2862-4062-8cb8-93c2904b7278-kube-api-access-7h2cc" (OuterVolumeSpecName: "kube-api-access-7h2cc") pod "ff1a02ba-2862-4062-8cb8-93c2904b7278" (UID: "ff1a02ba-2862-4062-8cb8-93c2904b7278"). InnerVolumeSpecName "kube-api-access-7h2cc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.810224 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.810224 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.812170 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ff1a02ba-2862-4062-8cb8-93c2904b7278","Type":"ContainerDied","Data":"4aeabb3a5b867ac473f2dc7baa301e6ee910aeb3881ac1855917a956b48dc557"} Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.812326 4869 scope.go:117] "RemoveContainer" containerID="f9aba5df0ce5305c558e1f5ef72fce5e13ea3f008f18888527eea5c4b2fa2c31" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.820265 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff1a02ba-2862-4062-8cb8-93c2904b7278" (UID: "ff1a02ba-2862-4062-8cb8-93c2904b7278"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.832166 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-config-data" (OuterVolumeSpecName: "config-data") pod "ff1a02ba-2862-4062-8cb8-93c2904b7278" (UID: "ff1a02ba-2862-4062-8cb8-93c2904b7278"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.847429 4869 scope.go:117] "RemoveContainer" containerID="e3906a0f79e0170154173258bec818e8d1d2eeaf724cf78c7cfeaec8afcd4346" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.852225 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.875504 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "ff1a02ba-2862-4062-8cb8-93c2904b7278" (UID: "ff1a02ba-2862-4062-8cb8-93c2904b7278"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.876952 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.888139 4869 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.888189 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.888203 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff1a02ba-2862-4062-8cb8-93c2904b7278-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.888214 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7h2cc\" (UniqueName: \"kubernetes.io/projected/ff1a02ba-2862-4062-8cb8-93c2904b7278-kube-api-access-7h2cc\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.890920 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Sep 29 14:04:09 crc kubenswrapper[4869]: E0929 14:04:09.891397 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" containerName="nova-manage" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891417 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" containerName="nova-manage" Sep 29 14:04:09 crc kubenswrapper[4869]: E0929 14:04:09.891436 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-metadata" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891443 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-metadata" Sep 29 14:04:09 crc kubenswrapper[4869]: E0929 14:04:09.891454 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-log" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891461 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-log" Sep 29 14:04:09 crc kubenswrapper[4869]: E0929 14:04:09.891473 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-log" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891478 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-log" Sep 29 14:04:09 crc kubenswrapper[4869]: E0929 14:04:09.891498 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-api" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891504 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-api" Sep 29 14:04:09 crc kubenswrapper[4869]: E0929 14:04:09.891516 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c5be2a8-3d7f-4360-85dc-dc3535017311" containerName="dnsmasq-dns" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891523 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c5be2a8-3d7f-4360-85dc-dc3535017311" containerName="dnsmasq-dns" Sep 29 14:04:09 crc kubenswrapper[4869]: E0929 14:04:09.891533 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c5be2a8-3d7f-4360-85dc-dc3535017311" containerName="init" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891539 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c5be2a8-3d7f-4360-85dc-dc3535017311" containerName="init" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891734 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c5be2a8-3d7f-4360-85dc-dc3535017311" containerName="dnsmasq-dns" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891746 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-log" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891759 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-api" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891774 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" containerName="nova-manage" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891781 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" containerName="nova-metadata-metadata" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.891795 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" containerName="nova-api-log" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.898511 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.902465 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.905261 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.905420 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.905452 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.990294 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-internal-tls-certs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.990428 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71dcb465-3ed7-4c66-a3df-0ab996e7c726-logs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.990462 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.990523 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-public-tls-certs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.990604 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-config-data\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:09 crc kubenswrapper[4869]: I0929 14:04:09.990685 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfp7b\" (UniqueName: \"kubernetes.io/projected/71dcb465-3ed7-4c66-a3df-0ab996e7c726-kube-api-access-dfp7b\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.092871 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71dcb465-3ed7-4c66-a3df-0ab996e7c726-logs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.092921 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.092962 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-public-tls-certs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.093025 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-config-data\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.093051 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfp7b\" (UniqueName: \"kubernetes.io/projected/71dcb465-3ed7-4c66-a3df-0ab996e7c726-kube-api-access-dfp7b\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.093090 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-internal-tls-certs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.094744 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71dcb465-3ed7-4c66-a3df-0ab996e7c726-logs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.097759 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-public-tls-certs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.097763 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.098188 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-internal-tls-certs\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.110554 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71dcb465-3ed7-4c66-a3df-0ab996e7c726-config-data\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.110941 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfp7b\" (UniqueName: \"kubernetes.io/projected/71dcb465-3ed7-4c66-a3df-0ab996e7c726-kube-api-access-dfp7b\") pod \"nova-api-0\" (UID: \"71dcb465-3ed7-4c66-a3df-0ab996e7c726\") " pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.234814 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.234940 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.254326 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1e21cae-762f-4dd0-812f-a82f49c208b1" path="/var/lib/kubelet/pods/f1e21cae-762f-4dd0-812f-a82f49c208b1/volumes" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.254958 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.255005 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.259048 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.259793 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.269533 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.270244 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.398522 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-config-data\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.398901 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c518901-c1c9-4b38-9f36-c65428e1937b-logs\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.398928 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.398964 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tthg\" (UniqueName: \"kubernetes.io/projected/0c518901-c1c9-4b38-9f36-c65428e1937b-kube-api-access-2tthg\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.399074 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.504407 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.504535 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-config-data\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.504659 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c518901-c1c9-4b38-9f36-c65428e1937b-logs\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.504696 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.504725 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tthg\" (UniqueName: \"kubernetes.io/projected/0c518901-c1c9-4b38-9f36-c65428e1937b-kube-api-access-2tthg\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.505176 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c518901-c1c9-4b38-9f36-c65428e1937b-logs\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.510131 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.510144 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.524204 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c518901-c1c9-4b38-9f36-c65428e1937b-config-data\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.527531 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tthg\" (UniqueName: \"kubernetes.io/projected/0c518901-c1c9-4b38-9f36-c65428e1937b-kube-api-access-2tthg\") pod \"nova-metadata-0\" (UID: \"0c518901-c1c9-4b38-9f36-c65428e1937b\") " pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.672319 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.735544 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Sep 29 14:04:10 crc kubenswrapper[4869]: W0929 14:04:10.744987 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71dcb465_3ed7_4c66_a3df_0ab996e7c726.slice/crio-8ad5a16b0cdfbf58300f0cc7623a3d98efb8b98fb1c9194a0d932639a7d22386 WatchSource:0}: Error finding container 8ad5a16b0cdfbf58300f0cc7623a3d98efb8b98fb1c9194a0d932639a7d22386: Status 404 returned error can't find the container with id 8ad5a16b0cdfbf58300f0cc7623a3d98efb8b98fb1c9194a0d932639a7d22386 Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.805513 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.832442 4869 generic.go:334] "Generic (PLEG): container finished" podID="a0abade9-67cb-4289-94c3-78acda74b360" containerID="431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc" exitCode=0 Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.832505 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a0abade9-67cb-4289-94c3-78acda74b360","Type":"ContainerDied","Data":"431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc"} Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.832534 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a0abade9-67cb-4289-94c3-78acda74b360","Type":"ContainerDied","Data":"1f3f1795a7c55e069f672d9621c8835a5ca93c2ac7368fdc4bb0f789e045b46c"} Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.832551 4869 scope.go:117] "RemoveContainer" containerID="431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.832642 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.835024 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"71dcb465-3ed7-4c66-a3df-0ab996e7c726","Type":"ContainerStarted","Data":"8ad5a16b0cdfbf58300f0cc7623a3d98efb8b98fb1c9194a0d932639a7d22386"} Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.872978 4869 scope.go:117] "RemoveContainer" containerID="431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc" Sep 29 14:04:10 crc kubenswrapper[4869]: E0929 14:04:10.873475 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc\": container with ID starting with 431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc not found: ID does not exist" containerID="431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.873520 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc"} err="failed to get container status \"431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc\": rpc error: code = NotFound desc = could not find container \"431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc\": container with ID starting with 431654952be92a4572aa8b081733e108502c71a8f662d9d1ad783466aac7f1dc not found: ID does not exist" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.911372 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-config-data\") pod \"a0abade9-67cb-4289-94c3-78acda74b360\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.911420 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-combined-ca-bundle\") pod \"a0abade9-67cb-4289-94c3-78acda74b360\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.911545 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knpf5\" (UniqueName: \"kubernetes.io/projected/a0abade9-67cb-4289-94c3-78acda74b360-kube-api-access-knpf5\") pod \"a0abade9-67cb-4289-94c3-78acda74b360\" (UID: \"a0abade9-67cb-4289-94c3-78acda74b360\") " Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.924266 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0abade9-67cb-4289-94c3-78acda74b360-kube-api-access-knpf5" (OuterVolumeSpecName: "kube-api-access-knpf5") pod "a0abade9-67cb-4289-94c3-78acda74b360" (UID: "a0abade9-67cb-4289-94c3-78acda74b360"). InnerVolumeSpecName "kube-api-access-knpf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.949883 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0abade9-67cb-4289-94c3-78acda74b360" (UID: "a0abade9-67cb-4289-94c3-78acda74b360"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:10 crc kubenswrapper[4869]: I0929 14:04:10.957335 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-config-data" (OuterVolumeSpecName: "config-data") pod "a0abade9-67cb-4289-94c3-78acda74b360" (UID: "a0abade9-67cb-4289-94c3-78acda74b360"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.013467 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knpf5\" (UniqueName: \"kubernetes.io/projected/a0abade9-67cb-4289-94c3-78acda74b360-kube-api-access-knpf5\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.013506 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.013519 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0abade9-67cb-4289-94c3-78acda74b360-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.148232 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Sep 29 14:04:11 crc kubenswrapper[4869]: W0929 14:04:11.155920 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c518901_c1c9_4b38_9f36_c65428e1937b.slice/crio-59a2eda022d9f121e70a77cc039755e4d6814c811d3a8a5947af9b79544bc0aa WatchSource:0}: Error finding container 59a2eda022d9f121e70a77cc039755e4d6814c811d3a8a5947af9b79544bc0aa: Status 404 returned error can't find the container with id 59a2eda022d9f121e70a77cc039755e4d6814c811d3a8a5947af9b79544bc0aa Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.322327 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.332478 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.339258 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:04:11 crc kubenswrapper[4869]: E0929 14:04:11.339731 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0abade9-67cb-4289-94c3-78acda74b360" containerName="nova-scheduler-scheduler" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.339750 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0abade9-67cb-4289-94c3-78acda74b360" containerName="nova-scheduler-scheduler" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.339976 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0abade9-67cb-4289-94c3-78acda74b360" containerName="nova-scheduler-scheduler" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.340892 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.343184 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.347692 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.433275 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4815a64-2248-4f94-b671-d62ef921dd68-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.433553 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vkdh\" (UniqueName: \"kubernetes.io/projected/c4815a64-2248-4f94-b671-d62ef921dd68-kube-api-access-9vkdh\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.433592 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4815a64-2248-4f94-b671-d62ef921dd68-config-data\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.558641 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vkdh\" (UniqueName: \"kubernetes.io/projected/c4815a64-2248-4f94-b671-d62ef921dd68-kube-api-access-9vkdh\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.558735 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4815a64-2248-4f94-b671-d62ef921dd68-config-data\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.559005 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4815a64-2248-4f94-b671-d62ef921dd68-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.564278 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4815a64-2248-4f94-b671-d62ef921dd68-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.564538 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4815a64-2248-4f94-b671-d62ef921dd68-config-data\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.575084 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vkdh\" (UniqueName: \"kubernetes.io/projected/c4815a64-2248-4f94-b671-d62ef921dd68-kube-api-access-9vkdh\") pod \"nova-scheduler-0\" (UID: \"c4815a64-2248-4f94-b671-d62ef921dd68\") " pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.666444 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.849851 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c518901-c1c9-4b38-9f36-c65428e1937b","Type":"ContainerStarted","Data":"8da2f2b9b5a76c285ce0172d1ffc47a03dcdc862fdd46fe4622b444ee1d4efff"} Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.850249 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c518901-c1c9-4b38-9f36-c65428e1937b","Type":"ContainerStarted","Data":"4009f616a874e84613283e5d5ef8b2b8f3c08f971e6e21b10c943f44be8c0551"} Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.850276 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c518901-c1c9-4b38-9f36-c65428e1937b","Type":"ContainerStarted","Data":"59a2eda022d9f121e70a77cc039755e4d6814c811d3a8a5947af9b79544bc0aa"} Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.858446 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"71dcb465-3ed7-4c66-a3df-0ab996e7c726","Type":"ContainerStarted","Data":"c9be9af08a2f062cad4f2891ee2d5696f5f4ceedc10690345cd1b16658105797"} Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.858502 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"71dcb465-3ed7-4c66-a3df-0ab996e7c726","Type":"ContainerStarted","Data":"562fd985ef76e24f11189b8aa557e2707db0db95a5f703b01027589ba8ffc7fc"} Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.872363 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.872341265 podStartE2EDuration="1.872341265s" podCreationTimestamp="2025-09-29 14:04:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:04:11.866313299 +0000 UTC m=+1378.306957629" watchObservedRunningTime="2025-09-29 14:04:11.872341265 +0000 UTC m=+1378.312985595" Sep 29 14:04:11 crc kubenswrapper[4869]: I0929 14:04:11.896021 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.8960009859999998 podStartE2EDuration="2.896000986s" podCreationTimestamp="2025-09-29 14:04:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:04:11.883161424 +0000 UTC m=+1378.323805744" watchObservedRunningTime="2025-09-29 14:04:11.896000986 +0000 UTC m=+1378.336645306" Sep 29 14:04:12 crc kubenswrapper[4869]: I0929 14:04:12.119204 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Sep 29 14:04:12 crc kubenswrapper[4869]: W0929 14:04:12.121137 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4815a64_2248_4f94_b671_d62ef921dd68.slice/crio-e995ff906309985aaa8315653eea6f4e8527807c88efdaffc5db344833f38096 WatchSource:0}: Error finding container e995ff906309985aaa8315653eea6f4e8527807c88efdaffc5db344833f38096: Status 404 returned error can't find the container with id e995ff906309985aaa8315653eea6f4e8527807c88efdaffc5db344833f38096 Sep 29 14:04:12 crc kubenswrapper[4869]: I0929 14:04:12.251225 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0abade9-67cb-4289-94c3-78acda74b360" path="/var/lib/kubelet/pods/a0abade9-67cb-4289-94c3-78acda74b360/volumes" Sep 29 14:04:12 crc kubenswrapper[4869]: I0929 14:04:12.251906 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff1a02ba-2862-4062-8cb8-93c2904b7278" path="/var/lib/kubelet/pods/ff1a02ba-2862-4062-8cb8-93c2904b7278/volumes" Sep 29 14:04:12 crc kubenswrapper[4869]: I0929 14:04:12.872245 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c4815a64-2248-4f94-b671-d62ef921dd68","Type":"ContainerStarted","Data":"55289d71db1e71c03488b13d88c636615e141f7f68eb559532c6379c4bb47251"} Sep 29 14:04:12 crc kubenswrapper[4869]: I0929 14:04:12.872480 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c4815a64-2248-4f94-b671-d62ef921dd68","Type":"ContainerStarted","Data":"e995ff906309985aaa8315653eea6f4e8527807c88efdaffc5db344833f38096"} Sep 29 14:04:12 crc kubenswrapper[4869]: I0929 14:04:12.896839 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.896820016 podStartE2EDuration="1.896820016s" podCreationTimestamp="2025-09-29 14:04:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:04:12.885324759 +0000 UTC m=+1379.325969079" watchObservedRunningTime="2025-09-29 14:04:12.896820016 +0000 UTC m=+1379.337464336" Sep 29 14:04:15 crc kubenswrapper[4869]: I0929 14:04:15.673166 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Sep 29 14:04:15 crc kubenswrapper[4869]: I0929 14:04:15.673710 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Sep 29 14:04:16 crc kubenswrapper[4869]: I0929 14:04:16.667059 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Sep 29 14:04:20 crc kubenswrapper[4869]: I0929 14:04:20.235593 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Sep 29 14:04:20 crc kubenswrapper[4869]: I0929 14:04:20.235945 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Sep 29 14:04:20 crc kubenswrapper[4869]: I0929 14:04:20.672795 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Sep 29 14:04:20 crc kubenswrapper[4869]: I0929 14:04:20.672861 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Sep 29 14:04:21 crc kubenswrapper[4869]: I0929 14:04:21.247762 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="71dcb465-3ed7-4c66-a3df-0ab996e7c726" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.209:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Sep 29 14:04:21 crc kubenswrapper[4869]: I0929 14:04:21.247770 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="71dcb465-3ed7-4c66-a3df-0ab996e7c726" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.209:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Sep 29 14:04:21 crc kubenswrapper[4869]: I0929 14:04:21.666656 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Sep 29 14:04:21 crc kubenswrapper[4869]: I0929 14:04:21.684907 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0c518901-c1c9-4b38-9f36-c65428e1937b" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Sep 29 14:04:21 crc kubenswrapper[4869]: I0929 14:04:21.684927 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0c518901-c1c9-4b38-9f36-c65428e1937b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Sep 29 14:04:21 crc kubenswrapper[4869]: I0929 14:04:21.735408 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Sep 29 14:04:21 crc kubenswrapper[4869]: I0929 14:04:21.984361 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Sep 29 14:04:25 crc kubenswrapper[4869]: I0929 14:04:25.929757 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.259148 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.259903 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.260302 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.260373 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.270747 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.273700 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.810215 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.814125 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Sep 29 14:04:30 crc kubenswrapper[4869]: I0929 14:04:30.824626 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Sep 29 14:04:31 crc kubenswrapper[4869]: I0929 14:04:31.033917 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Sep 29 14:04:39 crc kubenswrapper[4869]: I0929 14:04:39.551555 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 14:04:40 crc kubenswrapper[4869]: I0929 14:04:40.390483 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.141289 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8vgxf"] Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.143750 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.158177 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8vgxf"] Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.314586 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-utilities\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.314658 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-catalog-content\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.314736 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfzzz\" (UniqueName: \"kubernetes.io/projected/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-kube-api-access-jfzzz\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.416518 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-utilities\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.416583 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-catalog-content\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.416700 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfzzz\" (UniqueName: \"kubernetes.io/projected/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-kube-api-access-jfzzz\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.417069 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-utilities\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.417165 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-catalog-content\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.445774 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfzzz\" (UniqueName: \"kubernetes.io/projected/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-kube-api-access-jfzzz\") pod \"community-operators-8vgxf\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:41 crc kubenswrapper[4869]: I0929 14:04:41.462489 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:42 crc kubenswrapper[4869]: I0929 14:04:42.468281 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8vgxf"] Sep 29 14:04:43 crc kubenswrapper[4869]: I0929 14:04:43.151597 4869 generic.go:334] "Generic (PLEG): container finished" podID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerID="c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27" exitCode=0 Sep 29 14:04:43 crc kubenswrapper[4869]: I0929 14:04:43.151703 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vgxf" event={"ID":"6aed6b9d-a2ad-43c6-9609-d58a64f038c4","Type":"ContainerDied","Data":"c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27"} Sep 29 14:04:43 crc kubenswrapper[4869]: I0929 14:04:43.152014 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vgxf" event={"ID":"6aed6b9d-a2ad-43c6-9609-d58a64f038c4","Type":"ContainerStarted","Data":"b91e7c8035d65c853dc7f5b8808ef3d8344d5e6fa739a5b5235ca2b1126919b3"} Sep 29 14:04:43 crc kubenswrapper[4869]: I0929 14:04:43.208940 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerName="rabbitmq" containerID="cri-o://11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6" gracePeriod=604797 Sep 29 14:04:43 crc kubenswrapper[4869]: I0929 14:04:43.771940 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="0d97e3c5-9850-428b-9d88-89307901912d" containerName="rabbitmq" containerID="cri-o://bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2" gracePeriod=604797 Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.163421 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vgxf" event={"ID":"6aed6b9d-a2ad-43c6-9609-d58a64f038c4","Type":"ContainerStarted","Data":"d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a"} Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.879926 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.980749 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72xx4\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-kube-api-access-72xx4\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981013 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-erlang-cookie-secret\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981109 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-confd\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981180 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-server-conf\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981234 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981287 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-plugins-conf\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981326 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-pod-info\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981379 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-erlang-cookie\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981421 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-tls\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981465 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-config-data\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.981510 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-plugins\") pod \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\" (UID: \"ffba5854-b48f-4fd4-ba4b-0f1a0601239d\") " Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.983392 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.983955 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.989038 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.989654 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.993598 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-kube-api-access-72xx4" (OuterVolumeSpecName: "kube-api-access-72xx4") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "kube-api-access-72xx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:44 crc kubenswrapper[4869]: I0929 14:04:44.996100 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-pod-info" (OuterVolumeSpecName: "pod-info") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.003847 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.020203 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.036234 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-config-data" (OuterVolumeSpecName: "config-data") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.084644 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72xx4\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-kube-api-access-72xx4\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.085187 4869 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.085334 4869 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.087416 4869 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-plugins-conf\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.087567 4869 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-pod-info\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.088323 4869 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.096840 4869 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.097024 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.097125 4869 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.086383 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-server-conf" (OuterVolumeSpecName: "server-conf") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.133912 4869 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.170746 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "ffba5854-b48f-4fd4-ba4b-0f1a0601239d" (UID: "ffba5854-b48f-4fd4-ba4b-0f1a0601239d"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.179659 4869 generic.go:334] "Generic (PLEG): container finished" podID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerID="11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6" exitCode=0 Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.179731 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ffba5854-b48f-4fd4-ba4b-0f1a0601239d","Type":"ContainerDied","Data":"11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6"} Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.179808 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ffba5854-b48f-4fd4-ba4b-0f1a0601239d","Type":"ContainerDied","Data":"e659e9616738ed223cc31bc285f2949d371c8d0a1f443cc010cf5c8d8ad9a394"} Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.179830 4869 scope.go:117] "RemoveContainer" containerID="11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.180051 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.187850 4869 generic.go:334] "Generic (PLEG): container finished" podID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerID="d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a" exitCode=0 Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.187892 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vgxf" event={"ID":"6aed6b9d-a2ad-43c6-9609-d58a64f038c4","Type":"ContainerDied","Data":"d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a"} Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.198322 4869 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.198352 4869 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ffba5854-b48f-4fd4-ba4b-0f1a0601239d-server-conf\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.198362 4869 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.301269 4869 scope.go:117] "RemoveContainer" containerID="17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.302116 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.310535 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.336359 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 14:04:45 crc kubenswrapper[4869]: E0929 14:04:45.336774 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerName="setup-container" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.336790 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerName="setup-container" Sep 29 14:04:45 crc kubenswrapper[4869]: E0929 14:04:45.336804 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerName="rabbitmq" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.336810 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerName="rabbitmq" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.337015 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" containerName="rabbitmq" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.338011 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.339826 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.343097 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.343346 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.343513 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.343777 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.343909 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.344319 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xwt5x" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.351831 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.378491 4869 scope.go:117] "RemoveContainer" containerID="11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6" Sep 29 14:04:45 crc kubenswrapper[4869]: E0929 14:04:45.378908 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6\": container with ID starting with 11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6 not found: ID does not exist" containerID="11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.378935 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6"} err="failed to get container status \"11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6\": rpc error: code = NotFound desc = could not find container \"11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6\": container with ID starting with 11bdb5e71b8f16c0e46e4f065f77d390e0d45ae4b1d6eafe2276ce0ca7fbfaf6 not found: ID does not exist" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.378955 4869 scope.go:117] "RemoveContainer" containerID="17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5" Sep 29 14:04:45 crc kubenswrapper[4869]: E0929 14:04:45.379204 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5\": container with ID starting with 17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5 not found: ID does not exist" containerID="17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.379250 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5"} err="failed to get container status \"17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5\": rpc error: code = NotFound desc = could not find container \"17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5\": container with ID starting with 17f8151bad6e7a27c489b384bd95fbefea711f2c91ebb07ef79e8be2e7a196f5 not found: ID does not exist" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.503788 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504093 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504157 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504247 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504278 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504362 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-config-data\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504393 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504682 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3cce7bdd-3959-48a5-a69b-fdf27672879a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504712 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3cce7bdd-3959-48a5-a69b-fdf27672879a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504760 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.504836 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hjw2\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-kube-api-access-4hjw2\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.514865 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.605951 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0d97e3c5-9850-428b-9d88-89307901912d-erlang-cookie-secret\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606016 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0d97e3c5-9850-428b-9d88-89307901912d-pod-info\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606096 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-server-conf\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606140 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-erlang-cookie\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606190 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-confd\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606211 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-config-data\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606271 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpmrz\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-kube-api-access-vpmrz\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606334 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-tls\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606382 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-plugins-conf\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606455 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606478 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-plugins\") pod \"0d97e3c5-9850-428b-9d88-89307901912d\" (UID: \"0d97e3c5-9850-428b-9d88-89307901912d\") " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606754 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606799 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hjw2\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-kube-api-access-4hjw2\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606837 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606893 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.606976 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607022 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607044 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607113 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-config-data\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607141 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607173 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3cce7bdd-3959-48a5-a69b-fdf27672879a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607200 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3cce7bdd-3959-48a5-a69b-fdf27672879a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607220 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607316 4869 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.607682 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.608687 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.608855 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.609335 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.611702 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-config-data\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.611984 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.612420 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.614951 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3cce7bdd-3959-48a5-a69b-fdf27672879a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.616672 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.617810 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3cce7bdd-3959-48a5-a69b-fdf27672879a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.619964 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-kube-api-access-vpmrz" (OuterVolumeSpecName: "kube-api-access-vpmrz") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "kube-api-access-vpmrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.620764 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d97e3c5-9850-428b-9d88-89307901912d-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.621212 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.621222 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3cce7bdd-3959-48a5-a69b-fdf27672879a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.621985 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/0d97e3c5-9850-428b-9d88-89307901912d-pod-info" (OuterVolumeSpecName: "pod-info") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.622225 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.627155 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.630721 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hjw2\" (UniqueName: \"kubernetes.io/projected/3cce7bdd-3959-48a5-a69b-fdf27672879a-kube-api-access-4hjw2\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.644979 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-config-data" (OuterVolumeSpecName: "config-data") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.666255 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"3cce7bdd-3959-48a5-a69b-fdf27672879a\") " pod="openstack/rabbitmq-server-0" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.709459 4869 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0d97e3c5-9850-428b-9d88-89307901912d-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.709489 4869 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0d97e3c5-9850-428b-9d88-89307901912d-pod-info\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.709499 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.709508 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpmrz\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-kube-api-access-vpmrz\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.709518 4869 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.709548 4869 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-plugins-conf\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.709559 4869 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.709580 4869 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.710360 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-server-conf" (OuterVolumeSpecName: "server-conf") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.733292 4869 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.764641 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "0d97e3c5-9850-428b-9d88-89307901912d" (UID: "0d97e3c5-9850-428b-9d88-89307901912d"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.811923 4869 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.812089 4869 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0d97e3c5-9850-428b-9d88-89307901912d-server-conf\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.812330 4869 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0d97e3c5-9850-428b-9d88-89307901912d-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:45 crc kubenswrapper[4869]: I0929 14:04:45.961752 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.204367 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vgxf" event={"ID":"6aed6b9d-a2ad-43c6-9609-d58a64f038c4","Type":"ContainerStarted","Data":"ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d"} Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.207672 4869 generic.go:334] "Generic (PLEG): container finished" podID="0d97e3c5-9850-428b-9d88-89307901912d" containerID="bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2" exitCode=0 Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.207712 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0d97e3c5-9850-428b-9d88-89307901912d","Type":"ContainerDied","Data":"bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2"} Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.207731 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.207747 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0d97e3c5-9850-428b-9d88-89307901912d","Type":"ContainerDied","Data":"688b90684503e7dbee7fcb682ee322e2c7b7f809b367ea35f7eeedfcfffb0b7f"} Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.207768 4869 scope.go:117] "RemoveContainer" containerID="bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.242846 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8vgxf" podStartSLOduration=2.671465615 podStartE2EDuration="5.24282696s" podCreationTimestamp="2025-09-29 14:04:41 +0000 UTC" firstStartedPulling="2025-09-29 14:04:43.153140048 +0000 UTC m=+1409.593784358" lastFinishedPulling="2025-09-29 14:04:45.724501383 +0000 UTC m=+1412.165145703" observedRunningTime="2025-09-29 14:04:46.226311363 +0000 UTC m=+1412.666955703" watchObservedRunningTime="2025-09-29 14:04:46.24282696 +0000 UTC m=+1412.683471280" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.245630 4869 scope.go:117] "RemoveContainer" containerID="de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.275813 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffba5854-b48f-4fd4-ba4b-0f1a0601239d" path="/var/lib/kubelet/pods/ffba5854-b48f-4fd4-ba4b-0f1a0601239d/volumes" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.276657 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.291159 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.309088 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 14:04:46 crc kubenswrapper[4869]: E0929 14:04:46.310830 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d97e3c5-9850-428b-9d88-89307901912d" containerName="setup-container" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.310851 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d97e3c5-9850-428b-9d88-89307901912d" containerName="setup-container" Sep 29 14:04:46 crc kubenswrapper[4869]: E0929 14:04:46.310877 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d97e3c5-9850-428b-9d88-89307901912d" containerName="rabbitmq" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.310884 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d97e3c5-9850-428b-9d88-89307901912d" containerName="rabbitmq" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.311072 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d97e3c5-9850-428b-9d88-89307901912d" containerName="rabbitmq" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.312196 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.314582 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.314825 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.314922 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.315045 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.315138 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-n952x" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.315236 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.319751 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.321259 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.353309 4869 scope.go:117] "RemoveContainer" containerID="bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2" Sep 29 14:04:46 crc kubenswrapper[4869]: E0929 14:04:46.358049 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2\": container with ID starting with bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2 not found: ID does not exist" containerID="bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.358089 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2"} err="failed to get container status \"bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2\": rpc error: code = NotFound desc = could not find container \"bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2\": container with ID starting with bad0099e2d6e75d4759abdecd252ecd274679a19d1482f3f4145ee65edabe4d2 not found: ID does not exist" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.358113 4869 scope.go:117] "RemoveContainer" containerID="de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082" Sep 29 14:04:46 crc kubenswrapper[4869]: E0929 14:04:46.358487 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082\": container with ID starting with de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082 not found: ID does not exist" containerID="de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.358510 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082"} err="failed to get container status \"de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082\": rpc error: code = NotFound desc = could not find container \"de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082\": container with ID starting with de77cb3a26a6aeeb6dd831d6d08df42e8e6a875558c261ef0226a75a909aa082 not found: ID does not exist" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432362 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432465 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432511 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b99cc79-2a53-4c36-ba65-c45598593017-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432573 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b99cc79-2a53-4c36-ba65-c45598593017-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432596 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432806 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432847 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqjhl\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-kube-api-access-lqjhl\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432872 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432893 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.432922 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.433001 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.455819 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.534408 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.534471 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b99cc79-2a53-4c36-ba65-c45598593017-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.534768 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b99cc79-2a53-4c36-ba65-c45598593017-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.535545 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.535592 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.535660 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqjhl\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-kube-api-access-lqjhl\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.535685 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.535728 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.535758 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.535808 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.535868 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.536142 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.536288 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.536827 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.537050 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.537388 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.538337 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b99cc79-2a53-4c36-ba65-c45598593017-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.539644 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.539951 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b99cc79-2a53-4c36-ba65-c45598593017-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.540119 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b99cc79-2a53-4c36-ba65-c45598593017-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.540132 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.561285 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqjhl\" (UniqueName: \"kubernetes.io/projected/3b99cc79-2a53-4c36-ba65-c45598593017-kube-api-access-lqjhl\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.595188 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b99cc79-2a53-4c36-ba65-c45598593017\") " pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:46 crc kubenswrapper[4869]: I0929 14:04:46.650307 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:04:47 crc kubenswrapper[4869]: I0929 14:04:47.120674 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Sep 29 14:04:47 crc kubenswrapper[4869]: I0929 14:04:47.220988 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b99cc79-2a53-4c36-ba65-c45598593017","Type":"ContainerStarted","Data":"a65a26dae322487d58dc2a3708bb26d534bec47a37d080ae1054f6fbbe29c8fa"} Sep 29 14:04:47 crc kubenswrapper[4869]: I0929 14:04:47.223996 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3cce7bdd-3959-48a5-a69b-fdf27672879a","Type":"ContainerStarted","Data":"0d4a8f902078cac72c06148af0f558ae18df9f6d45c086c43ec6bc60d28527ac"} Sep 29 14:04:47 crc kubenswrapper[4869]: I0929 14:04:47.224040 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3cce7bdd-3959-48a5-a69b-fdf27672879a","Type":"ContainerStarted","Data":"009159cbbbdfc48bc0e3d6aa294d408ee8b95802b2893095c2b71c3603282f2d"} Sep 29 14:04:48 crc kubenswrapper[4869]: I0929 14:04:48.233194 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b99cc79-2a53-4c36-ba65-c45598593017","Type":"ContainerStarted","Data":"b59778ac878f5f999e53ed220ad1b9874780df1d91b1ac1bc3c04c80a1633860"} Sep 29 14:04:48 crc kubenswrapper[4869]: I0929 14:04:48.253175 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d97e3c5-9850-428b-9d88-89307901912d" path="/var/lib/kubelet/pods/0d97e3c5-9850-428b-9d88-89307901912d/volumes" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.464102 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.464718 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.515455 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.755592 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4258g"] Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.757804 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.767721 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4258g"] Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.830494 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-catalog-content\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.830854 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-utilities\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.830898 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcjfh\" (UniqueName: \"kubernetes.io/projected/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-kube-api-access-fcjfh\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.933053 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-catalog-content\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.933339 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-utilities\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.933448 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcjfh\" (UniqueName: \"kubernetes.io/projected/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-kube-api-access-fcjfh\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.933526 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-catalog-content\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.934094 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-utilities\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:51 crc kubenswrapper[4869]: I0929 14:04:51.955136 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcjfh\" (UniqueName: \"kubernetes.io/projected/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-kube-api-access-fcjfh\") pod \"redhat-operators-4258g\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:52 crc kubenswrapper[4869]: I0929 14:04:52.077935 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:04:52 crc kubenswrapper[4869]: I0929 14:04:52.327456 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:52 crc kubenswrapper[4869]: I0929 14:04:52.557487 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4258g"] Sep 29 14:04:53 crc kubenswrapper[4869]: I0929 14:04:53.286679 4869 generic.go:334] "Generic (PLEG): container finished" podID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerID="fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657" exitCode=0 Sep 29 14:04:53 crc kubenswrapper[4869]: I0929 14:04:53.286776 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4258g" event={"ID":"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9","Type":"ContainerDied","Data":"fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657"} Sep 29 14:04:53 crc kubenswrapper[4869]: I0929 14:04:53.287765 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4258g" event={"ID":"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9","Type":"ContainerStarted","Data":"1a320fe411723d8b7221068025bc0c91aa858bf43f3d791315d2ec0cfe98454b"} Sep 29 14:04:54 crc kubenswrapper[4869]: I0929 14:04:54.298084 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4258g" event={"ID":"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9","Type":"ContainerStarted","Data":"8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30"} Sep 29 14:04:54 crc kubenswrapper[4869]: I0929 14:04:54.745637 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8vgxf"] Sep 29 14:04:54 crc kubenswrapper[4869]: I0929 14:04:54.745882 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8vgxf" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerName="registry-server" containerID="cri-o://ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d" gracePeriod=2 Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.240931 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.297642 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfzzz\" (UniqueName: \"kubernetes.io/projected/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-kube-api-access-jfzzz\") pod \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.297742 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-utilities\") pod \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.297834 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-catalog-content\") pod \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\" (UID: \"6aed6b9d-a2ad-43c6-9609-d58a64f038c4\") " Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.299244 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-utilities" (OuterVolumeSpecName: "utilities") pod "6aed6b9d-a2ad-43c6-9609-d58a64f038c4" (UID: "6aed6b9d-a2ad-43c6-9609-d58a64f038c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.306275 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-kube-api-access-jfzzz" (OuterVolumeSpecName: "kube-api-access-jfzzz") pod "6aed6b9d-a2ad-43c6-9609-d58a64f038c4" (UID: "6aed6b9d-a2ad-43c6-9609-d58a64f038c4"). InnerVolumeSpecName "kube-api-access-jfzzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.312227 4869 generic.go:334] "Generic (PLEG): container finished" podID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerID="ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d" exitCode=0 Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.312309 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vgxf" event={"ID":"6aed6b9d-a2ad-43c6-9609-d58a64f038c4","Type":"ContainerDied","Data":"ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d"} Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.312344 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vgxf" event={"ID":"6aed6b9d-a2ad-43c6-9609-d58a64f038c4","Type":"ContainerDied","Data":"b91e7c8035d65c853dc7f5b8808ef3d8344d5e6fa739a5b5235ca2b1126919b3"} Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.312364 4869 scope.go:117] "RemoveContainer" containerID="ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.312520 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8vgxf" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.322763 4869 generic.go:334] "Generic (PLEG): container finished" podID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerID="8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30" exitCode=0 Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.322805 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4258g" event={"ID":"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9","Type":"ContainerDied","Data":"8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30"} Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.363966 4869 scope.go:117] "RemoveContainer" containerID="d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.385359 4869 scope.go:117] "RemoveContainer" containerID="c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.400052 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfzzz\" (UniqueName: \"kubernetes.io/projected/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-kube-api-access-jfzzz\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.400787 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.424271 4869 scope.go:117] "RemoveContainer" containerID="ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d" Sep 29 14:04:55 crc kubenswrapper[4869]: E0929 14:04:55.424779 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d\": container with ID starting with ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d not found: ID does not exist" containerID="ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.424819 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d"} err="failed to get container status \"ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d\": rpc error: code = NotFound desc = could not find container \"ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d\": container with ID starting with ef533075bf06ef62c622578844cc1ca5ad4eb51cf42723768c3bb197f166c89d not found: ID does not exist" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.424846 4869 scope.go:117] "RemoveContainer" containerID="d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a" Sep 29 14:04:55 crc kubenswrapper[4869]: E0929 14:04:55.425260 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a\": container with ID starting with d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a not found: ID does not exist" containerID="d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.425292 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a"} err="failed to get container status \"d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a\": rpc error: code = NotFound desc = could not find container \"d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a\": container with ID starting with d9aaeaeb274703fc68a540321aaf6e2909a121255febee9ee2445b691baf693a not found: ID does not exist" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.425316 4869 scope.go:117] "RemoveContainer" containerID="c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27" Sep 29 14:04:55 crc kubenswrapper[4869]: E0929 14:04:55.425734 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27\": container with ID starting with c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27 not found: ID does not exist" containerID="c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.425773 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27"} err="failed to get container status \"c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27\": rpc error: code = NotFound desc = could not find container \"c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27\": container with ID starting with c71b82acf6a0765122fa1dcfe7a55a7338cac5a8800ee1a741ba5ce8e4631c27 not found: ID does not exist" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.904087 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6aed6b9d-a2ad-43c6-9609-d58a64f038c4" (UID: "6aed6b9d-a2ad-43c6-9609-d58a64f038c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.909141 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aed6b9d-a2ad-43c6-9609-d58a64f038c4-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.946284 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8vgxf"] Sep 29 14:04:55 crc kubenswrapper[4869]: I0929 14:04:55.956009 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8vgxf"] Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.254191 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" path="/var/lib/kubelet/pods/6aed6b9d-a2ad-43c6-9609-d58a64f038c4/volumes" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.338858 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4258g" event={"ID":"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9","Type":"ContainerStarted","Data":"cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f"} Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.361717 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4258g" podStartSLOduration=2.701782392 podStartE2EDuration="5.361695374s" podCreationTimestamp="2025-09-29 14:04:51 +0000 UTC" firstStartedPulling="2025-09-29 14:04:53.288957391 +0000 UTC m=+1419.729601711" lastFinishedPulling="2025-09-29 14:04:55.948870373 +0000 UTC m=+1422.389514693" observedRunningTime="2025-09-29 14:04:56.357821004 +0000 UTC m=+1422.798465354" watchObservedRunningTime="2025-09-29 14:04:56.361695374 +0000 UTC m=+1422.802339694" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.922671 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b7cfdbcc7-nwlvh"] Sep 29 14:04:56 crc kubenswrapper[4869]: E0929 14:04:56.923096 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerName="extract-utilities" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.923115 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerName="extract-utilities" Sep 29 14:04:56 crc kubenswrapper[4869]: E0929 14:04:56.923145 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerName="registry-server" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.923155 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerName="registry-server" Sep 29 14:04:56 crc kubenswrapper[4869]: E0929 14:04:56.923169 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerName="extract-content" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.923176 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerName="extract-content" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.923368 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="6aed6b9d-a2ad-43c6-9609-d58a64f038c4" containerName="registry-server" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.924416 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.927209 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Sep 29 14:04:56 crc kubenswrapper[4869]: I0929 14:04:56.936983 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b7cfdbcc7-nwlvh"] Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.030589 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-sb\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.030725 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-openstack-edpm-ipam\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.030744 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-nb\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.030768 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-config\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.030804 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhfv2\" (UniqueName: \"kubernetes.io/projected/eaaaa329-ff8e-4a77-92b5-67eaec395bae-kube-api-access-vhfv2\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.030888 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-dns-svc\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.132053 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-openstack-edpm-ipam\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.132148 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-nb\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.132176 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-config\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.132216 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhfv2\" (UniqueName: \"kubernetes.io/projected/eaaaa329-ff8e-4a77-92b5-67eaec395bae-kube-api-access-vhfv2\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.132276 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-dns-svc\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.132298 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-sb\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.132968 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-openstack-edpm-ipam\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.133199 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-nb\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.133313 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-config\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.133433 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-dns-svc\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.133830 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-sb\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.150837 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhfv2\" (UniqueName: \"kubernetes.io/projected/eaaaa329-ff8e-4a77-92b5-67eaec395bae-kube-api-access-vhfv2\") pod \"dnsmasq-dns-b7cfdbcc7-nwlvh\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.250919 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:57 crc kubenswrapper[4869]: I0929 14:04:57.690918 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b7cfdbcc7-nwlvh"] Sep 29 14:04:58 crc kubenswrapper[4869]: I0929 14:04:58.365565 4869 generic.go:334] "Generic (PLEG): container finished" podID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" containerID="165831ee73179c3489b9a81ede8bbaea1c4d7a69f066f38c7c62f1a25eb6c462" exitCode=0 Sep 29 14:04:58 crc kubenswrapper[4869]: I0929 14:04:58.366156 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" event={"ID":"eaaaa329-ff8e-4a77-92b5-67eaec395bae","Type":"ContainerDied","Data":"165831ee73179c3489b9a81ede8bbaea1c4d7a69f066f38c7c62f1a25eb6c462"} Sep 29 14:04:58 crc kubenswrapper[4869]: I0929 14:04:58.366188 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" event={"ID":"eaaaa329-ff8e-4a77-92b5-67eaec395bae","Type":"ContainerStarted","Data":"ae9c178ee03f4b5cbc64476aad129303ae5553e436b716a5582e1d46eb27e477"} Sep 29 14:04:59 crc kubenswrapper[4869]: I0929 14:04:59.380354 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" event={"ID":"eaaaa329-ff8e-4a77-92b5-67eaec395bae","Type":"ContainerStarted","Data":"5418a67fe86a9694690422fbb615934577a4e4571d534e1fdddd9ffe4787aa89"} Sep 29 14:04:59 crc kubenswrapper[4869]: I0929 14:04:59.382222 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:04:59 crc kubenswrapper[4869]: I0929 14:04:59.404934 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" podStartSLOduration=3.404917601 podStartE2EDuration="3.404917601s" podCreationTimestamp="2025-09-29 14:04:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:04:59.401929644 +0000 UTC m=+1425.842573964" watchObservedRunningTime="2025-09-29 14:04:59.404917601 +0000 UTC m=+1425.845561921" Sep 29 14:05:02 crc kubenswrapper[4869]: I0929 14:05:02.078902 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:05:02 crc kubenswrapper[4869]: I0929 14:05:02.079205 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:05:02 crc kubenswrapper[4869]: I0929 14:05:02.125697 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:05:02 crc kubenswrapper[4869]: I0929 14:05:02.453111 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:05:02 crc kubenswrapper[4869]: I0929 14:05:02.493585 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4258g"] Sep 29 14:05:04 crc kubenswrapper[4869]: I0929 14:05:04.422848 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4258g" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerName="registry-server" containerID="cri-o://cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f" gracePeriod=2 Sep 29 14:05:04 crc kubenswrapper[4869]: I0929 14:05:04.893470 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:05:04 crc kubenswrapper[4869]: I0929 14:05:04.988071 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcjfh\" (UniqueName: \"kubernetes.io/projected/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-kube-api-access-fcjfh\") pod \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " Sep 29 14:05:04 crc kubenswrapper[4869]: I0929 14:05:04.988138 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-catalog-content\") pod \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " Sep 29 14:05:04 crc kubenswrapper[4869]: I0929 14:05:04.988246 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-utilities\") pod \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\" (UID: \"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9\") " Sep 29 14:05:04 crc kubenswrapper[4869]: I0929 14:05:04.989488 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-utilities" (OuterVolumeSpecName: "utilities") pod "86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" (UID: "86a5ccf5-ea11-4774-963e-61e5d1c6bbe9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:05:04 crc kubenswrapper[4869]: I0929 14:05:04.995388 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-kube-api-access-fcjfh" (OuterVolumeSpecName: "kube-api-access-fcjfh") pod "86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" (UID: "86a5ccf5-ea11-4774-963e-61e5d1c6bbe9"). InnerVolumeSpecName "kube-api-access-fcjfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.090044 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcjfh\" (UniqueName: \"kubernetes.io/projected/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-kube-api-access-fcjfh\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.090080 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.270754 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" (UID: "86a5ccf5-ea11-4774-963e-61e5d1c6bbe9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.292636 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.436015 4869 generic.go:334] "Generic (PLEG): container finished" podID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerID="cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f" exitCode=0 Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.436057 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4258g" event={"ID":"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9","Type":"ContainerDied","Data":"cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f"} Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.436087 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4258g" event={"ID":"86a5ccf5-ea11-4774-963e-61e5d1c6bbe9","Type":"ContainerDied","Data":"1a320fe411723d8b7221068025bc0c91aa858bf43f3d791315d2ec0cfe98454b"} Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.436106 4869 scope.go:117] "RemoveContainer" containerID="cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.436140 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4258g" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.458718 4869 scope.go:117] "RemoveContainer" containerID="8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.480229 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4258g"] Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.485993 4869 scope.go:117] "RemoveContainer" containerID="fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.489306 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4258g"] Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.538558 4869 scope.go:117] "RemoveContainer" containerID="cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f" Sep 29 14:05:05 crc kubenswrapper[4869]: E0929 14:05:05.539085 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f\": container with ID starting with cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f not found: ID does not exist" containerID="cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.539225 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f"} err="failed to get container status \"cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f\": rpc error: code = NotFound desc = could not find container \"cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f\": container with ID starting with cbff7099961829fa3008e5593da817fdc275dc7e9fd064251958c96d546dfe4f not found: ID does not exist" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.539321 4869 scope.go:117] "RemoveContainer" containerID="8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30" Sep 29 14:05:05 crc kubenswrapper[4869]: E0929 14:05:05.539889 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30\": container with ID starting with 8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30 not found: ID does not exist" containerID="8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.539937 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30"} err="failed to get container status \"8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30\": rpc error: code = NotFound desc = could not find container \"8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30\": container with ID starting with 8b2fd88e1e9ca5f88751947f51d2eb9388bf1966c08de15a870de61abb0d6f30 not found: ID does not exist" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.539970 4869 scope.go:117] "RemoveContainer" containerID="fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657" Sep 29 14:05:05 crc kubenswrapper[4869]: E0929 14:05:05.540370 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657\": container with ID starting with fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657 not found: ID does not exist" containerID="fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657" Sep 29 14:05:05 crc kubenswrapper[4869]: I0929 14:05:05.540438 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657"} err="failed to get container status \"fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657\": rpc error: code = NotFound desc = could not find container \"fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657\": container with ID starting with fe30f909dbbf5fb590c7d8befdf59ee448d99e2a61a84251cc674816fec2e657 not found: ID does not exist" Sep 29 14:05:06 crc kubenswrapper[4869]: I0929 14:05:06.254128 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" path="/var/lib/kubelet/pods/86a5ccf5-ea11-4774-963e-61e5d1c6bbe9/volumes" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.252742 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.318272 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-585f5c457c-pn6z2"] Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.318763 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" podUID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" containerName="dnsmasq-dns" containerID="cri-o://491b0dfa6bb97ec839de67306bdf16afc8a5fa9ab609437e137d20f99f7961f9" gracePeriod=10 Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.494100 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cdbf8bf5-l6hdp"] Sep 29 14:05:07 crc kubenswrapper[4869]: E0929 14:05:07.494487 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerName="extract-content" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.494498 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerName="extract-content" Sep 29 14:05:07 crc kubenswrapper[4869]: E0929 14:05:07.494513 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerName="extract-utilities" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.494519 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerName="extract-utilities" Sep 29 14:05:07 crc kubenswrapper[4869]: E0929 14:05:07.494536 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerName="registry-server" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.494543 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerName="registry-server" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.494727 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="86a5ccf5-ea11-4774-963e-61e5d1c6bbe9" containerName="registry-server" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.496522 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.510447 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cdbf8bf5-l6hdp"] Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.523984 4869 generic.go:334] "Generic (PLEG): container finished" podID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" containerID="491b0dfa6bb97ec839de67306bdf16afc8a5fa9ab609437e137d20f99f7961f9" exitCode=0 Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.524038 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" event={"ID":"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1","Type":"ContainerDied","Data":"491b0dfa6bb97ec839de67306bdf16afc8a5fa9ab609437e137d20f99f7961f9"} Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.532228 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-dns-svc\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.532504 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-openstack-edpm-ipam\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.532628 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-config\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.532865 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp7cq\" (UniqueName: \"kubernetes.io/projected/9ab22b18-ab47-45aa-8967-fec232b92cbb-kube-api-access-rp7cq\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.533050 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-ovsdbserver-sb\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.533079 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-ovsdbserver-nb\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.634713 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-dns-svc\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.634850 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-openstack-edpm-ipam\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.634907 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-config\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.635011 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp7cq\" (UniqueName: \"kubernetes.io/projected/9ab22b18-ab47-45aa-8967-fec232b92cbb-kube-api-access-rp7cq\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.635079 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-ovsdbserver-sb\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.635101 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-ovsdbserver-nb\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.635957 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-openstack-edpm-ipam\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.636285 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-ovsdbserver-sb\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.636514 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-dns-svc\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.636541 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-config\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.637025 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ab22b18-ab47-45aa-8967-fec232b92cbb-ovsdbserver-nb\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.654052 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp7cq\" (UniqueName: \"kubernetes.io/projected/9ab22b18-ab47-45aa-8967-fec232b92cbb-kube-api-access-rp7cq\") pod \"dnsmasq-dns-5cdbf8bf5-l6hdp\" (UID: \"9ab22b18-ab47-45aa-8967-fec232b92cbb\") " pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.842079 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:07 crc kubenswrapper[4869]: I0929 14:05:07.939413 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.044387 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-dns-svc\") pod \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.044502 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-config\") pod \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.044761 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-nb\") pod \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.044845 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmkdh\" (UniqueName: \"kubernetes.io/projected/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-kube-api-access-dmkdh\") pod \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.044873 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-sb\") pod \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\" (UID: \"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1\") " Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.049129 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-kube-api-access-dmkdh" (OuterVolumeSpecName: "kube-api-access-dmkdh") pod "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" (UID: "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1"). InnerVolumeSpecName "kube-api-access-dmkdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.103661 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-config" (OuterVolumeSpecName: "config") pod "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" (UID: "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.107586 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" (UID: "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.110940 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" (UID: "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.126562 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" (UID: "1a63bcfc-67ae-4d97-9ca9-e833b76e30f1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.146864 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.146898 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmkdh\" (UniqueName: \"kubernetes.io/projected/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-kube-api-access-dmkdh\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.146911 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.146921 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.146931 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.314269 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cdbf8bf5-l6hdp"] Sep 29 14:05:08 crc kubenswrapper[4869]: W0929 14:05:08.326854 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ab22b18_ab47_45aa_8967_fec232b92cbb.slice/crio-e29df92351f78604f1287ab80836afde3f1646a63ff1039179e5a54ff0d19a8a WatchSource:0}: Error finding container e29df92351f78604f1287ab80836afde3f1646a63ff1039179e5a54ff0d19a8a: Status 404 returned error can't find the container with id e29df92351f78604f1287ab80836afde3f1646a63ff1039179e5a54ff0d19a8a Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.534488 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.534479 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-585f5c457c-pn6z2" event={"ID":"1a63bcfc-67ae-4d97-9ca9-e833b76e30f1","Type":"ContainerDied","Data":"5778eea00cd61c949971cb3655e441bf8d4a129f84b825a80a747c93a3f8ae5c"} Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.534888 4869 scope.go:117] "RemoveContainer" containerID="491b0dfa6bb97ec839de67306bdf16afc8a5fa9ab609437e137d20f99f7961f9" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.551318 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" event={"ID":"9ab22b18-ab47-45aa-8967-fec232b92cbb","Type":"ContainerStarted","Data":"e29df92351f78604f1287ab80836afde3f1646a63ff1039179e5a54ff0d19a8a"} Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.579588 4869 scope.go:117] "RemoveContainer" containerID="c9e51ccc006bcffa164301b1d8f074fae69a671c201a2308803e2d64cb2eb308" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.584088 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-585f5c457c-pn6z2"] Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.602777 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-585f5c457c-pn6z2"] Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.623796 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h"] Sep 29 14:05:08 crc kubenswrapper[4869]: E0929 14:05:08.625192 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" containerName="dnsmasq-dns" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.625281 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" containerName="dnsmasq-dns" Sep 29 14:05:08 crc kubenswrapper[4869]: E0929 14:05:08.625479 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" containerName="init" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.625549 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" containerName="init" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.626138 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" containerName="dnsmasq-dns" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.645235 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.649152 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.649323 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.649161 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.649931 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.654735 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h"] Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.655117 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.655180 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.655267 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.655391 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4psdp\" (UniqueName: \"kubernetes.io/projected/4e18b32b-bc94-48bf-9042-6f3b03a56811-kube-api-access-4psdp\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.757885 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4psdp\" (UniqueName: \"kubernetes.io/projected/4e18b32b-bc94-48bf-9042-6f3b03a56811-kube-api-access-4psdp\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.758047 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.758090 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.758133 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.763966 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.764215 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.764181 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:08 crc kubenswrapper[4869]: I0929 14:05:08.773683 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4psdp\" (UniqueName: \"kubernetes.io/projected/4e18b32b-bc94-48bf-9042-6f3b03a56811-kube-api-access-4psdp\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:09 crc kubenswrapper[4869]: I0929 14:05:09.066992 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:09 crc kubenswrapper[4869]: I0929 14:05:09.563855 4869 generic.go:334] "Generic (PLEG): container finished" podID="9ab22b18-ab47-45aa-8967-fec232b92cbb" containerID="fede3d6efd1f4d8d716142830b011fe76afec2ec6420e181e17c8709fe5d7f7a" exitCode=0 Sep 29 14:05:09 crc kubenswrapper[4869]: I0929 14:05:09.563956 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" event={"ID":"9ab22b18-ab47-45aa-8967-fec232b92cbb","Type":"ContainerDied","Data":"fede3d6efd1f4d8d716142830b011fe76afec2ec6420e181e17c8709fe5d7f7a"} Sep 29 14:05:09 crc kubenswrapper[4869]: I0929 14:05:09.605227 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h"] Sep 29 14:05:09 crc kubenswrapper[4869]: W0929 14:05:09.622657 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4e18b32b_bc94_48bf_9042_6f3b03a56811.slice/crio-5dfe8bf6d2aea9d16b12a7bc9a60d7b35691baaf0a12aee52424b60c58b4771d WatchSource:0}: Error finding container 5dfe8bf6d2aea9d16b12a7bc9a60d7b35691baaf0a12aee52424b60c58b4771d: Status 404 returned error can't find the container with id 5dfe8bf6d2aea9d16b12a7bc9a60d7b35691baaf0a12aee52424b60c58b4771d Sep 29 14:05:10 crc kubenswrapper[4869]: I0929 14:05:10.253098 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a63bcfc-67ae-4d97-9ca9-e833b76e30f1" path="/var/lib/kubelet/pods/1a63bcfc-67ae-4d97-9ca9-e833b76e30f1/volumes" Sep 29 14:05:10 crc kubenswrapper[4869]: I0929 14:05:10.574367 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" event={"ID":"4e18b32b-bc94-48bf-9042-6f3b03a56811","Type":"ContainerStarted","Data":"5dfe8bf6d2aea9d16b12a7bc9a60d7b35691baaf0a12aee52424b60c58b4771d"} Sep 29 14:05:10 crc kubenswrapper[4869]: I0929 14:05:10.577457 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" event={"ID":"9ab22b18-ab47-45aa-8967-fec232b92cbb","Type":"ContainerStarted","Data":"7f0ced1408c6650c9ab264ac2e583ffb06e76fe4ef2bd3977be150b9236ac22a"} Sep 29 14:05:10 crc kubenswrapper[4869]: I0929 14:05:10.577676 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:10 crc kubenswrapper[4869]: I0929 14:05:10.602957 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" podStartSLOduration=3.602935457 podStartE2EDuration="3.602935457s" podCreationTimestamp="2025-09-29 14:05:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:05:10.594726365 +0000 UTC m=+1437.035370685" watchObservedRunningTime="2025-09-29 14:05:10.602935457 +0000 UTC m=+1437.043579777" Sep 29 14:05:17 crc kubenswrapper[4869]: I0929 14:05:17.061304 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:05:17 crc kubenswrapper[4869]: I0929 14:05:17.641810 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" event={"ID":"4e18b32b-bc94-48bf-9042-6f3b03a56811","Type":"ContainerStarted","Data":"abea5017b835abc0a0ebcfcb98d8f1e648bbad419cac12a52c26bb1987b709fe"} Sep 29 14:05:17 crc kubenswrapper[4869]: I0929 14:05:17.643201 4869 generic.go:334] "Generic (PLEG): container finished" podID="3cce7bdd-3959-48a5-a69b-fdf27672879a" containerID="0d4a8f902078cac72c06148af0f558ae18df9f6d45c086c43ec6bc60d28527ac" exitCode=0 Sep 29 14:05:17 crc kubenswrapper[4869]: I0929 14:05:17.643226 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3cce7bdd-3959-48a5-a69b-fdf27672879a","Type":"ContainerDied","Data":"0d4a8f902078cac72c06148af0f558ae18df9f6d45c086c43ec6bc60d28527ac"} Sep 29 14:05:17 crc kubenswrapper[4869]: I0929 14:05:17.667453 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" podStartSLOduration=2.238774426 podStartE2EDuration="9.667433707s" podCreationTimestamp="2025-09-29 14:05:08 +0000 UTC" firstStartedPulling="2025-09-29 14:05:09.628507534 +0000 UTC m=+1436.069151854" lastFinishedPulling="2025-09-29 14:05:17.057166815 +0000 UTC m=+1443.497811135" observedRunningTime="2025-09-29 14:05:17.658930738 +0000 UTC m=+1444.099575058" watchObservedRunningTime="2025-09-29 14:05:17.667433707 +0000 UTC m=+1444.108078027" Sep 29 14:05:17 crc kubenswrapper[4869]: I0929 14:05:17.843681 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5cdbf8bf5-l6hdp" Sep 29 14:05:17 crc kubenswrapper[4869]: I0929 14:05:17.898875 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b7cfdbcc7-nwlvh"] Sep 29 14:05:17 crc kubenswrapper[4869]: I0929 14:05:17.899100 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" podUID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" containerName="dnsmasq-dns" containerID="cri-o://5418a67fe86a9694690422fbb615934577a4e4571d534e1fdddd9ffe4787aa89" gracePeriod=10 Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.654364 4869 generic.go:334] "Generic (PLEG): container finished" podID="3b99cc79-2a53-4c36-ba65-c45598593017" containerID="b59778ac878f5f999e53ed220ad1b9874780df1d91b1ac1bc3c04c80a1633860" exitCode=0 Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.654445 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b99cc79-2a53-4c36-ba65-c45598593017","Type":"ContainerDied","Data":"b59778ac878f5f999e53ed220ad1b9874780df1d91b1ac1bc3c04c80a1633860"} Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.665498 4869 generic.go:334] "Generic (PLEG): container finished" podID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" containerID="5418a67fe86a9694690422fbb615934577a4e4571d534e1fdddd9ffe4787aa89" exitCode=0 Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.665595 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" event={"ID":"eaaaa329-ff8e-4a77-92b5-67eaec395bae","Type":"ContainerDied","Data":"5418a67fe86a9694690422fbb615934577a4e4571d534e1fdddd9ffe4787aa89"} Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.671178 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3cce7bdd-3959-48a5-a69b-fdf27672879a","Type":"ContainerStarted","Data":"fffbd500d41a2fd2bd5859e62df65c9320c6dff25c178c41c8a00137f33303e0"} Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.671872 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.712250 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=33.712233061 podStartE2EDuration="33.712233061s" podCreationTimestamp="2025-09-29 14:04:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:05:18.710491807 +0000 UTC m=+1445.151136127" watchObservedRunningTime="2025-09-29 14:05:18.712233061 +0000 UTC m=+1445.152877381" Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.828651 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.941923 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhfv2\" (UniqueName: \"kubernetes.io/projected/eaaaa329-ff8e-4a77-92b5-67eaec395bae-kube-api-access-vhfv2\") pod \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.942013 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-sb\") pod \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.942126 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-config\") pod \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.942149 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-nb\") pod \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.942179 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-dns-svc\") pod \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.942257 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-openstack-edpm-ipam\") pod \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\" (UID: \"eaaaa329-ff8e-4a77-92b5-67eaec395bae\") " Sep 29 14:05:18 crc kubenswrapper[4869]: I0929 14:05:18.949986 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eaaaa329-ff8e-4a77-92b5-67eaec395bae-kube-api-access-vhfv2" (OuterVolumeSpecName: "kube-api-access-vhfv2") pod "eaaaa329-ff8e-4a77-92b5-67eaec395bae" (UID: "eaaaa329-ff8e-4a77-92b5-67eaec395bae"). InnerVolumeSpecName "kube-api-access-vhfv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.018066 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "eaaaa329-ff8e-4a77-92b5-67eaec395bae" (UID: "eaaaa329-ff8e-4a77-92b5-67eaec395bae"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.018235 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-config" (OuterVolumeSpecName: "config") pod "eaaaa329-ff8e-4a77-92b5-67eaec395bae" (UID: "eaaaa329-ff8e-4a77-92b5-67eaec395bae"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.020436 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "eaaaa329-ff8e-4a77-92b5-67eaec395bae" (UID: "eaaaa329-ff8e-4a77-92b5-67eaec395bae"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.023062 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eaaaa329-ff8e-4a77-92b5-67eaec395bae" (UID: "eaaaa329-ff8e-4a77-92b5-67eaec395bae"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.026548 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "eaaaa329-ff8e-4a77-92b5-67eaec395bae" (UID: "eaaaa329-ff8e-4a77-92b5-67eaec395bae"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.046591 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.046639 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.046650 4869 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-dns-svc\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.046659 4869 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.046669 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhfv2\" (UniqueName: \"kubernetes.io/projected/eaaaa329-ff8e-4a77-92b5-67eaec395bae-kube-api-access-vhfv2\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.046678 4869 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaaaa329-ff8e-4a77-92b5-67eaec395bae-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.682297 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b99cc79-2a53-4c36-ba65-c45598593017","Type":"ContainerStarted","Data":"0271f30de4bb9841cd3717a888256381fe6e982103d4e1024ccaf0ac33898611"} Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.683935 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.687377 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.687450 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b7cfdbcc7-nwlvh" event={"ID":"eaaaa329-ff8e-4a77-92b5-67eaec395bae","Type":"ContainerDied","Data":"ae9c178ee03f4b5cbc64476aad129303ae5553e436b716a5582e1d46eb27e477"} Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.687512 4869 scope.go:117] "RemoveContainer" containerID="5418a67fe86a9694690422fbb615934577a4e4571d534e1fdddd9ffe4787aa89" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.727073 4869 scope.go:117] "RemoveContainer" containerID="165831ee73179c3489b9a81ede8bbaea1c4d7a69f066f38c7c62f1a25eb6c462" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.728033 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=33.728010019 podStartE2EDuration="33.728010019s" podCreationTimestamp="2025-09-29 14:04:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:05:19.707035289 +0000 UTC m=+1446.147679609" watchObservedRunningTime="2025-09-29 14:05:19.728010019 +0000 UTC m=+1446.168654339" Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.753996 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b7cfdbcc7-nwlvh"] Sep 29 14:05:19 crc kubenswrapper[4869]: I0929 14:05:19.765728 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b7cfdbcc7-nwlvh"] Sep 29 14:05:20 crc kubenswrapper[4869]: I0929 14:05:20.253272 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" path="/var/lib/kubelet/pods/eaaaa329-ff8e-4a77-92b5-67eaec395bae/volumes" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.598892 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qpffg"] Sep 29 14:05:27 crc kubenswrapper[4869]: E0929 14:05:27.599568 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" containerName="dnsmasq-dns" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.599581 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" containerName="dnsmasq-dns" Sep 29 14:05:27 crc kubenswrapper[4869]: E0929 14:05:27.599602 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" containerName="init" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.599627 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" containerName="init" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.599817 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="eaaaa329-ff8e-4a77-92b5-67eaec395bae" containerName="dnsmasq-dns" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.601238 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.605668 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-catalog-content\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.605762 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-utilities\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.605801 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwksw\" (UniqueName: \"kubernetes.io/projected/c732020f-26a7-4848-97ab-2b91cb919c1c-kube-api-access-pwksw\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.610111 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qpffg"] Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.707400 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwksw\" (UniqueName: \"kubernetes.io/projected/c732020f-26a7-4848-97ab-2b91cb919c1c-kube-api-access-pwksw\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.707666 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-catalog-content\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.707725 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-utilities\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.708349 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-utilities\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.708370 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-catalog-content\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.729645 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwksw\" (UniqueName: \"kubernetes.io/projected/c732020f-26a7-4848-97ab-2b91cb919c1c-kube-api-access-pwksw\") pod \"certified-operators-qpffg\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:27 crc kubenswrapper[4869]: I0929 14:05:27.927749 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:28 crc kubenswrapper[4869]: I0929 14:05:28.413037 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qpffg"] Sep 29 14:05:28 crc kubenswrapper[4869]: W0929 14:05:28.418566 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc732020f_26a7_4848_97ab_2b91cb919c1c.slice/crio-2fc86c4ce9c296cc640948a19c070adc7617ac012a83ecb19214125b84608361 WatchSource:0}: Error finding container 2fc86c4ce9c296cc640948a19c070adc7617ac012a83ecb19214125b84608361: Status 404 returned error can't find the container with id 2fc86c4ce9c296cc640948a19c070adc7617ac012a83ecb19214125b84608361 Sep 29 14:05:28 crc kubenswrapper[4869]: I0929 14:05:28.763535 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qpffg" event={"ID":"c732020f-26a7-4848-97ab-2b91cb919c1c","Type":"ContainerStarted","Data":"2fc86c4ce9c296cc640948a19c070adc7617ac012a83ecb19214125b84608361"} Sep 29 14:05:29 crc kubenswrapper[4869]: E0929 14:05:29.075966 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc732020f_26a7_4848_97ab_2b91cb919c1c.slice/crio-429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:05:29 crc kubenswrapper[4869]: I0929 14:05:29.778351 4869 generic.go:334] "Generic (PLEG): container finished" podID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerID="429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db" exitCode=0 Sep 29 14:05:29 crc kubenswrapper[4869]: I0929 14:05:29.778466 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qpffg" event={"ID":"c732020f-26a7-4848-97ab-2b91cb919c1c","Type":"ContainerDied","Data":"429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db"} Sep 29 14:05:35 crc kubenswrapper[4869]: I0929 14:05:35.964134 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="3cce7bdd-3959-48a5-a69b-fdf27672879a" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.213:5671: connect: connection refused" Sep 29 14:05:36 crc kubenswrapper[4869]: I0929 14:05:36.654037 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="3b99cc79-2a53-4c36-ba65-c45598593017" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.214:5671: connect: connection refused" Sep 29 14:05:36 crc kubenswrapper[4869]: I0929 14:05:36.844934 4869 generic.go:334] "Generic (PLEG): container finished" podID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerID="0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a" exitCode=0 Sep 29 14:05:36 crc kubenswrapper[4869]: I0929 14:05:36.844980 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qpffg" event={"ID":"c732020f-26a7-4848-97ab-2b91cb919c1c","Type":"ContainerDied","Data":"0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a"} Sep 29 14:05:37 crc kubenswrapper[4869]: I0929 14:05:37.856366 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qpffg" event={"ID":"c732020f-26a7-4848-97ab-2b91cb919c1c","Type":"ContainerStarted","Data":"f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32"} Sep 29 14:05:37 crc kubenswrapper[4869]: I0929 14:05:37.858036 4869 generic.go:334] "Generic (PLEG): container finished" podID="4e18b32b-bc94-48bf-9042-6f3b03a56811" containerID="abea5017b835abc0a0ebcfcb98d8f1e648bbad419cac12a52c26bb1987b709fe" exitCode=0 Sep 29 14:05:37 crc kubenswrapper[4869]: I0929 14:05:37.858086 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" event={"ID":"4e18b32b-bc94-48bf-9042-6f3b03a56811","Type":"ContainerDied","Data":"abea5017b835abc0a0ebcfcb98d8f1e648bbad419cac12a52c26bb1987b709fe"} Sep 29 14:05:37 crc kubenswrapper[4869]: I0929 14:05:37.877096 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qpffg" podStartSLOduration=3.165925593 podStartE2EDuration="10.877072522s" podCreationTimestamp="2025-09-29 14:05:27 +0000 UTC" firstStartedPulling="2025-09-29 14:05:29.780035092 +0000 UTC m=+1456.220679412" lastFinishedPulling="2025-09-29 14:05:37.491182021 +0000 UTC m=+1463.931826341" observedRunningTime="2025-09-29 14:05:37.875514132 +0000 UTC m=+1464.316158462" watchObservedRunningTime="2025-09-29 14:05:37.877072522 +0000 UTC m=+1464.317716842" Sep 29 14:05:37 crc kubenswrapper[4869]: I0929 14:05:37.928329 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:37 crc kubenswrapper[4869]: I0929 14:05:37.928403 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:38 crc kubenswrapper[4869]: I0929 14:05:38.973983 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-qpffg" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="registry-server" probeResult="failure" output=< Sep 29 14:05:38 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 14:05:38 crc kubenswrapper[4869]: > Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.299925 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.428350 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4psdp\" (UniqueName: \"kubernetes.io/projected/4e18b32b-bc94-48bf-9042-6f3b03a56811-kube-api-access-4psdp\") pod \"4e18b32b-bc94-48bf-9042-6f3b03a56811\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.428794 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-inventory\") pod \"4e18b32b-bc94-48bf-9042-6f3b03a56811\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.428871 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-ssh-key\") pod \"4e18b32b-bc94-48bf-9042-6f3b03a56811\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.429072 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-repo-setup-combined-ca-bundle\") pod \"4e18b32b-bc94-48bf-9042-6f3b03a56811\" (UID: \"4e18b32b-bc94-48bf-9042-6f3b03a56811\") " Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.434700 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "4e18b32b-bc94-48bf-9042-6f3b03a56811" (UID: "4e18b32b-bc94-48bf-9042-6f3b03a56811"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.434961 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e18b32b-bc94-48bf-9042-6f3b03a56811-kube-api-access-4psdp" (OuterVolumeSpecName: "kube-api-access-4psdp") pod "4e18b32b-bc94-48bf-9042-6f3b03a56811" (UID: "4e18b32b-bc94-48bf-9042-6f3b03a56811"). InnerVolumeSpecName "kube-api-access-4psdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.459131 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-inventory" (OuterVolumeSpecName: "inventory") pod "4e18b32b-bc94-48bf-9042-6f3b03a56811" (UID: "4e18b32b-bc94-48bf-9042-6f3b03a56811"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.459454 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4e18b32b-bc94-48bf-9042-6f3b03a56811" (UID: "4e18b32b-bc94-48bf-9042-6f3b03a56811"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.531319 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.531528 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.531596 4869 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e18b32b-bc94-48bf-9042-6f3b03a56811-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.531706 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4psdp\" (UniqueName: \"kubernetes.io/projected/4e18b32b-bc94-48bf-9042-6f3b03a56811-kube-api-access-4psdp\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.878082 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" event={"ID":"4e18b32b-bc94-48bf-9042-6f3b03a56811","Type":"ContainerDied","Data":"5dfe8bf6d2aea9d16b12a7bc9a60d7b35691baaf0a12aee52424b60c58b4771d"} Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.878151 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5dfe8bf6d2aea9d16b12a7bc9a60d7b35691baaf0a12aee52424b60c58b4771d" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.878098 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.973197 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd"] Sep 29 14:05:39 crc kubenswrapper[4869]: E0929 14:05:39.974086 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e18b32b-bc94-48bf-9042-6f3b03a56811" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.974113 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e18b32b-bc94-48bf-9042-6f3b03a56811" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.974370 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e18b32b-bc94-48bf-9042-6f3b03a56811" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.975231 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.978060 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.978325 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.978541 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.978708 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:05:39 crc kubenswrapper[4869]: I0929 14:05:39.985175 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd"] Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.144045 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.144092 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cnsr\" (UniqueName: \"kubernetes.io/projected/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-kube-api-access-8cnsr\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.144471 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.144664 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.246113 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.246173 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cnsr\" (UniqueName: \"kubernetes.io/projected/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-kube-api-access-8cnsr\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.246298 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.246352 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.252918 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.252919 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.253032 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.276420 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cnsr\" (UniqueName: \"kubernetes.io/projected/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-kube-api-access-8cnsr\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.294089 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.824061 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd"] Sep 29 14:05:40 crc kubenswrapper[4869]: I0929 14:05:40.888777 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" event={"ID":"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9","Type":"ContainerStarted","Data":"bfb518bf58d12aa109b33c522c37266215e9c206ea1ecd849ffa1e51430cd289"} Sep 29 14:05:41 crc kubenswrapper[4869]: I0929 14:05:41.911147 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" event={"ID":"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9","Type":"ContainerStarted","Data":"36b7d9d5a9700938d1563870424ec500d98cf311151a5d884b3e2c521ce828c4"} Sep 29 14:05:41 crc kubenswrapper[4869]: I0929 14:05:41.929944 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" podStartSLOduration=2.469459277 podStartE2EDuration="2.929925229s" podCreationTimestamp="2025-09-29 14:05:39 +0000 UTC" firstStartedPulling="2025-09-29 14:05:40.815172531 +0000 UTC m=+1467.255816851" lastFinishedPulling="2025-09-29 14:05:41.275638483 +0000 UTC m=+1467.716282803" observedRunningTime="2025-09-29 14:05:41.923124523 +0000 UTC m=+1468.363768863" watchObservedRunningTime="2025-09-29 14:05:41.929925229 +0000 UTC m=+1468.370569549" Sep 29 14:05:45 crc kubenswrapper[4869]: I0929 14:05:45.963868 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Sep 29 14:05:46 crc kubenswrapper[4869]: I0929 14:05:46.652438 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Sep 29 14:05:47 crc kubenswrapper[4869]: I0929 14:05:47.976364 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.023106 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.132673 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qpffg"] Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.175055 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fffjd"] Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.175371 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fffjd" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="registry-server" containerID="cri-o://12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71" gracePeriod=2 Sep 29 14:05:48 crc kubenswrapper[4869]: E0929 14:05:48.260928 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71 is running failed: container process not found" containerID="12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71" cmd=["grpc_health_probe","-addr=:50051"] Sep 29 14:05:48 crc kubenswrapper[4869]: E0929 14:05:48.261251 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71 is running failed: container process not found" containerID="12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71" cmd=["grpc_health_probe","-addr=:50051"] Sep 29 14:05:48 crc kubenswrapper[4869]: E0929 14:05:48.261816 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71 is running failed: container process not found" containerID="12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71" cmd=["grpc_health_probe","-addr=:50051"] Sep 29 14:05:48 crc kubenswrapper[4869]: E0929 14:05:48.261843 4869 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-fffjd" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="registry-server" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.701560 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fffjd" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.802610 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-catalog-content\") pod \"5db434b8-32b9-4401-aac4-2865a87bfdb1\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.802789 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-utilities\") pod \"5db434b8-32b9-4401-aac4-2865a87bfdb1\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.802836 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwnp6\" (UniqueName: \"kubernetes.io/projected/5db434b8-32b9-4401-aac4-2865a87bfdb1-kube-api-access-jwnp6\") pod \"5db434b8-32b9-4401-aac4-2865a87bfdb1\" (UID: \"5db434b8-32b9-4401-aac4-2865a87bfdb1\") " Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.803227 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-utilities" (OuterVolumeSpecName: "utilities") pod "5db434b8-32b9-4401-aac4-2865a87bfdb1" (UID: "5db434b8-32b9-4401-aac4-2865a87bfdb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.803948 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.811687 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5db434b8-32b9-4401-aac4-2865a87bfdb1-kube-api-access-jwnp6" (OuterVolumeSpecName: "kube-api-access-jwnp6") pod "5db434b8-32b9-4401-aac4-2865a87bfdb1" (UID: "5db434b8-32b9-4401-aac4-2865a87bfdb1"). InnerVolumeSpecName "kube-api-access-jwnp6". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.847094 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5db434b8-32b9-4401-aac4-2865a87bfdb1" (UID: "5db434b8-32b9-4401-aac4-2865a87bfdb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.905805 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwnp6\" (UniqueName: \"kubernetes.io/projected/5db434b8-32b9-4401-aac4-2865a87bfdb1-kube-api-access-jwnp6\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.905876 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5db434b8-32b9-4401-aac4-2865a87bfdb1-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.978895 4869 generic.go:334] "Generic (PLEG): container finished" podID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerID="12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71" exitCode=0 Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.978974 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fffjd" event={"ID":"5db434b8-32b9-4401-aac4-2865a87bfdb1","Type":"ContainerDied","Data":"12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71"} Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.979018 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fffjd" event={"ID":"5db434b8-32b9-4401-aac4-2865a87bfdb1","Type":"ContainerDied","Data":"63d44e317d50e6b7f528e7a9db98e74fa9befed38a22832b07481be1095f6e94"} Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.979046 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fffjd" Sep 29 14:05:48 crc kubenswrapper[4869]: I0929 14:05:48.979046 4869 scope.go:117] "RemoveContainer" containerID="12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71" Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.002963 4869 scope.go:117] "RemoveContainer" containerID="65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1" Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.021008 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fffjd"] Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.036227 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fffjd"] Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.040606 4869 scope.go:117] "RemoveContainer" containerID="d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79" Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.071315 4869 scope.go:117] "RemoveContainer" containerID="12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71" Sep 29 14:05:49 crc kubenswrapper[4869]: E0929 14:05:49.072119 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71\": container with ID starting with 12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71 not found: ID does not exist" containerID="12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71" Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.072152 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71"} err="failed to get container status \"12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71\": rpc error: code = NotFound desc = could not find container \"12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71\": container with ID starting with 12790e7028725f4b98ed1fb7748a173eb9ad11df45cda42a2cdeddcbc9508b71 not found: ID does not exist" Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.072177 4869 scope.go:117] "RemoveContainer" containerID="65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1" Sep 29 14:05:49 crc kubenswrapper[4869]: E0929 14:05:49.072912 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1\": container with ID starting with 65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1 not found: ID does not exist" containerID="65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1" Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.072945 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1"} err="failed to get container status \"65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1\": rpc error: code = NotFound desc = could not find container \"65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1\": container with ID starting with 65caec9e93b41ea6214fafb91275ae7f45d6b6370269dedaa4f198c0b5d759d1 not found: ID does not exist" Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.072962 4869 scope.go:117] "RemoveContainer" containerID="d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79" Sep 29 14:05:49 crc kubenswrapper[4869]: E0929 14:05:49.074998 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79\": container with ID starting with d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79 not found: ID does not exist" containerID="d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79" Sep 29 14:05:49 crc kubenswrapper[4869]: I0929 14:05:49.075070 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79"} err="failed to get container status \"d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79\": rpc error: code = NotFound desc = could not find container \"d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79\": container with ID starting with d4e3e08c90cc8f61a6d08d29cf020f44a036437716d161b55b764a11896a4e79 not found: ID does not exist" Sep 29 14:05:50 crc kubenswrapper[4869]: I0929 14:05:50.252756 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" path="/var/lib/kubelet/pods/5db434b8-32b9-4401-aac4-2865a87bfdb1/volumes" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.111851 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8x8fn"] Sep 29 14:06:17 crc kubenswrapper[4869]: E0929 14:06:17.112879 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="extract-utilities" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.112898 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="extract-utilities" Sep 29 14:06:17 crc kubenswrapper[4869]: E0929 14:06:17.112911 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="extract-content" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.112919 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="extract-content" Sep 29 14:06:17 crc kubenswrapper[4869]: E0929 14:06:17.112955 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="registry-server" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.112963 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="registry-server" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.113189 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5db434b8-32b9-4401-aac4-2865a87bfdb1" containerName="registry-server" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.115060 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.122294 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8x8fn"] Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.235417 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vgs7\" (UniqueName: \"kubernetes.io/projected/66d47fe6-cf7a-4505-ae63-8790e33de8f4-kube-api-access-4vgs7\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.235597 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-utilities\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.235709 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-catalog-content\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.338181 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-utilities\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.338250 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-catalog-content\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.338349 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vgs7\" (UniqueName: \"kubernetes.io/projected/66d47fe6-cf7a-4505-ae63-8790e33de8f4-kube-api-access-4vgs7\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.338776 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-utilities\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.338845 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-catalog-content\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.361793 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vgs7\" (UniqueName: \"kubernetes.io/projected/66d47fe6-cf7a-4505-ae63-8790e33de8f4-kube-api-access-4vgs7\") pod \"redhat-marketplace-8x8fn\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.445380 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:17 crc kubenswrapper[4869]: I0929 14:06:17.916461 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8x8fn"] Sep 29 14:06:17 crc kubenswrapper[4869]: W0929 14:06:17.918330 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66d47fe6_cf7a_4505_ae63_8790e33de8f4.slice/crio-2720bb677f1787cb08b4d52628fcb69e7303846095159f389b38e4b5c2869c87 WatchSource:0}: Error finding container 2720bb677f1787cb08b4d52628fcb69e7303846095159f389b38e4b5c2869c87: Status 404 returned error can't find the container with id 2720bb677f1787cb08b4d52628fcb69e7303846095159f389b38e4b5c2869c87 Sep 29 14:06:18 crc kubenswrapper[4869]: I0929 14:06:18.265485 4869 generic.go:334] "Generic (PLEG): container finished" podID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerID="5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95" exitCode=0 Sep 29 14:06:18 crc kubenswrapper[4869]: I0929 14:06:18.275295 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8x8fn" event={"ID":"66d47fe6-cf7a-4505-ae63-8790e33de8f4","Type":"ContainerDied","Data":"5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95"} Sep 29 14:06:18 crc kubenswrapper[4869]: I0929 14:06:18.275342 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8x8fn" event={"ID":"66d47fe6-cf7a-4505-ae63-8790e33de8f4","Type":"ContainerStarted","Data":"2720bb677f1787cb08b4d52628fcb69e7303846095159f389b38e4b5c2869c87"} Sep 29 14:06:19 crc kubenswrapper[4869]: I0929 14:06:19.897731 4869 scope.go:117] "RemoveContainer" containerID="e8b18c233c2243fde14ba079808129a4b7275a61f4fbbf1183b51dde01dad42e" Sep 29 14:06:20 crc kubenswrapper[4869]: I0929 14:06:20.295780 4869 generic.go:334] "Generic (PLEG): container finished" podID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerID="f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de" exitCode=0 Sep 29 14:06:20 crc kubenswrapper[4869]: I0929 14:06:20.295827 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8x8fn" event={"ID":"66d47fe6-cf7a-4505-ae63-8790e33de8f4","Type":"ContainerDied","Data":"f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de"} Sep 29 14:06:20 crc kubenswrapper[4869]: I0929 14:06:20.658011 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:06:20 crc kubenswrapper[4869]: I0929 14:06:20.658637 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:06:21 crc kubenswrapper[4869]: I0929 14:06:21.308727 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8x8fn" event={"ID":"66d47fe6-cf7a-4505-ae63-8790e33de8f4","Type":"ContainerStarted","Data":"6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd"} Sep 29 14:06:21 crc kubenswrapper[4869]: I0929 14:06:21.345475 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8x8fn" podStartSLOduration=1.828274398 podStartE2EDuration="4.345400644s" podCreationTimestamp="2025-09-29 14:06:17 +0000 UTC" firstStartedPulling="2025-09-29 14:06:18.268464731 +0000 UTC m=+1504.709109041" lastFinishedPulling="2025-09-29 14:06:20.785590967 +0000 UTC m=+1507.226235287" observedRunningTime="2025-09-29 14:06:21.328461673 +0000 UTC m=+1507.769106013" watchObservedRunningTime="2025-09-29 14:06:21.345400644 +0000 UTC m=+1507.786044964" Sep 29 14:06:27 crc kubenswrapper[4869]: I0929 14:06:27.446298 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:27 crc kubenswrapper[4869]: I0929 14:06:27.446876 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:27 crc kubenswrapper[4869]: I0929 14:06:27.508052 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:28 crc kubenswrapper[4869]: I0929 14:06:28.414042 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:28 crc kubenswrapper[4869]: I0929 14:06:28.455402 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8x8fn"] Sep 29 14:06:30 crc kubenswrapper[4869]: I0929 14:06:30.389356 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8x8fn" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerName="registry-server" containerID="cri-o://6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd" gracePeriod=2 Sep 29 14:06:30 crc kubenswrapper[4869]: E0929 14:06:30.610236 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66d47fe6_cf7a_4505_ae63_8790e33de8f4.slice/crio-conmon-6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66d47fe6_cf7a_4505_ae63_8790e33de8f4.slice/crio-6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:06:30 crc kubenswrapper[4869]: I0929 14:06:30.839999 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:30 crc kubenswrapper[4869]: I0929 14:06:30.996931 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-catalog-content\") pod \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " Sep 29 14:06:30 crc kubenswrapper[4869]: I0929 14:06:30.997449 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-utilities\") pod \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " Sep 29 14:06:30 crc kubenswrapper[4869]: I0929 14:06:30.997506 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vgs7\" (UniqueName: \"kubernetes.io/projected/66d47fe6-cf7a-4505-ae63-8790e33de8f4-kube-api-access-4vgs7\") pod \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\" (UID: \"66d47fe6-cf7a-4505-ae63-8790e33de8f4\") " Sep 29 14:06:30 crc kubenswrapper[4869]: I0929 14:06:30.998404 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-utilities" (OuterVolumeSpecName: "utilities") pod "66d47fe6-cf7a-4505-ae63-8790e33de8f4" (UID: "66d47fe6-cf7a-4505-ae63-8790e33de8f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.003739 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66d47fe6-cf7a-4505-ae63-8790e33de8f4-kube-api-access-4vgs7" (OuterVolumeSpecName: "kube-api-access-4vgs7") pod "66d47fe6-cf7a-4505-ae63-8790e33de8f4" (UID: "66d47fe6-cf7a-4505-ae63-8790e33de8f4"). InnerVolumeSpecName "kube-api-access-4vgs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.012389 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66d47fe6-cf7a-4505-ae63-8790e33de8f4" (UID: "66d47fe6-cf7a-4505-ae63-8790e33de8f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.099706 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.099737 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vgs7\" (UniqueName: \"kubernetes.io/projected/66d47fe6-cf7a-4505-ae63-8790e33de8f4-kube-api-access-4vgs7\") on node \"crc\" DevicePath \"\"" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.099746 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66d47fe6-cf7a-4505-ae63-8790e33de8f4-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.399284 4869 generic.go:334] "Generic (PLEG): container finished" podID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerID="6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd" exitCode=0 Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.399334 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8x8fn" event={"ID":"66d47fe6-cf7a-4505-ae63-8790e33de8f4","Type":"ContainerDied","Data":"6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd"} Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.399354 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8x8fn" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.399385 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8x8fn" event={"ID":"66d47fe6-cf7a-4505-ae63-8790e33de8f4","Type":"ContainerDied","Data":"2720bb677f1787cb08b4d52628fcb69e7303846095159f389b38e4b5c2869c87"} Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.399407 4869 scope.go:117] "RemoveContainer" containerID="6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.437027 4869 scope.go:117] "RemoveContainer" containerID="f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.445826 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8x8fn"] Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.458221 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8x8fn"] Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.461269 4869 scope.go:117] "RemoveContainer" containerID="5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.506927 4869 scope.go:117] "RemoveContainer" containerID="6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd" Sep 29 14:06:31 crc kubenswrapper[4869]: E0929 14:06:31.507360 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd\": container with ID starting with 6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd not found: ID does not exist" containerID="6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.507411 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd"} err="failed to get container status \"6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd\": rpc error: code = NotFound desc = could not find container \"6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd\": container with ID starting with 6d30682440ba313dd9f6fadeab5f147c62687eead3af5fba4dcd37d3887a36bd not found: ID does not exist" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.507445 4869 scope.go:117] "RemoveContainer" containerID="f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de" Sep 29 14:06:31 crc kubenswrapper[4869]: E0929 14:06:31.507767 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de\": container with ID starting with f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de not found: ID does not exist" containerID="f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.507796 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de"} err="failed to get container status \"f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de\": rpc error: code = NotFound desc = could not find container \"f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de\": container with ID starting with f962b4402a12053111cf5ac551559413e38e1c91a8c0e9c55710cb544ba099de not found: ID does not exist" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.507815 4869 scope.go:117] "RemoveContainer" containerID="5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95" Sep 29 14:06:31 crc kubenswrapper[4869]: E0929 14:06:31.508037 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95\": container with ID starting with 5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95 not found: ID does not exist" containerID="5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95" Sep 29 14:06:31 crc kubenswrapper[4869]: I0929 14:06:31.508067 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95"} err="failed to get container status \"5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95\": rpc error: code = NotFound desc = could not find container \"5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95\": container with ID starting with 5bba48d8d1fdd6f6eca260f84876b306b4a4fefa50eb145d2b078fe32a1c2d95 not found: ID does not exist" Sep 29 14:06:32 crc kubenswrapper[4869]: I0929 14:06:32.256231 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" path="/var/lib/kubelet/pods/66d47fe6-cf7a-4505-ae63-8790e33de8f4/volumes" Sep 29 14:06:50 crc kubenswrapper[4869]: I0929 14:06:50.656876 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:06:50 crc kubenswrapper[4869]: I0929 14:06:50.657514 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:07:19 crc kubenswrapper[4869]: I0929 14:07:19.978298 4869 scope.go:117] "RemoveContainer" containerID="4e9ab8c5cada21407e25ac2a575ceb4deaa58a7801d53893463db62fdcda333d" Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.011016 4869 scope.go:117] "RemoveContainer" containerID="7e72ca549713c7e7b27dbae5b9ee4dbacfdd9679c375b30ef619e62893dc6cf6" Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.657376 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.657464 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.657528 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.659151 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.659268 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" gracePeriod=600 Sep 29 14:07:20 crc kubenswrapper[4869]: E0929 14:07:20.780059 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.853779 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" exitCode=0 Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.853826 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac"} Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.853863 4869 scope.go:117] "RemoveContainer" containerID="9bb29cba4ec61eaadf6d94eecf52b0683364614417f5603e9f08f9d5b6ae2413" Sep 29 14:07:20 crc kubenswrapper[4869]: I0929 14:07:20.854487 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:07:20 crc kubenswrapper[4869]: E0929 14:07:20.854789 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:07:36 crc kubenswrapper[4869]: I0929 14:07:36.242251 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:07:36 crc kubenswrapper[4869]: E0929 14:07:36.244011 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:07:47 crc kubenswrapper[4869]: I0929 14:07:47.241772 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:07:47 crc kubenswrapper[4869]: E0929 14:07:47.242526 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:07:58 crc kubenswrapper[4869]: I0929 14:07:58.242077 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:07:58 crc kubenswrapper[4869]: E0929 14:07:58.242865 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:08:11 crc kubenswrapper[4869]: I0929 14:08:11.242935 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:08:11 crc kubenswrapper[4869]: E0929 14:08:11.244003 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:08:20 crc kubenswrapper[4869]: I0929 14:08:20.103256 4869 scope.go:117] "RemoveContainer" containerID="103bd3db004da711da2a6438d65b8ff04c86dbd29830163b8ebe45f17d10379e" Sep 29 14:08:20 crc kubenswrapper[4869]: I0929 14:08:20.135675 4869 scope.go:117] "RemoveContainer" containerID="7ccbc43e343a590326da5e130378854cfd9eb2dc824169fc4c6aed4491d28681" Sep 29 14:08:26 crc kubenswrapper[4869]: I0929 14:08:26.242029 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:08:26 crc kubenswrapper[4869]: E0929 14:08:26.242818 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:08:38 crc kubenswrapper[4869]: I0929 14:08:38.242040 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:08:38 crc kubenswrapper[4869]: E0929 14:08:38.242909 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:08:52 crc kubenswrapper[4869]: I0929 14:08:52.242292 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:08:52 crc kubenswrapper[4869]: E0929 14:08:52.243171 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:08:57 crc kubenswrapper[4869]: I0929 14:08:57.777928 4869 generic.go:334] "Generic (PLEG): container finished" podID="e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" containerID="36b7d9d5a9700938d1563870424ec500d98cf311151a5d884b3e2c521ce828c4" exitCode=0 Sep 29 14:08:57 crc kubenswrapper[4869]: I0929 14:08:57.778011 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" event={"ID":"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9","Type":"ContainerDied","Data":"36b7d9d5a9700938d1563870424ec500d98cf311151a5d884b3e2c521ce828c4"} Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.171221 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.268835 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-ssh-key\") pod \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.268952 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-bootstrap-combined-ca-bundle\") pod \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.269030 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cnsr\" (UniqueName: \"kubernetes.io/projected/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-kube-api-access-8cnsr\") pod \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.269197 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-inventory\") pod \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\" (UID: \"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9\") " Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.285859 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" (UID: "e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.294835 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-kube-api-access-8cnsr" (OuterVolumeSpecName: "kube-api-access-8cnsr") pod "e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" (UID: "e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9"). InnerVolumeSpecName "kube-api-access-8cnsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.332957 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-inventory" (OuterVolumeSpecName: "inventory") pod "e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" (UID: "e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.350988 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" (UID: "e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.372233 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cnsr\" (UniqueName: \"kubernetes.io/projected/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-kube-api-access-8cnsr\") on node \"crc\" DevicePath \"\"" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.372282 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.372293 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.372304 4869 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.796002 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" event={"ID":"e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9","Type":"ContainerDied","Data":"bfb518bf58d12aa109b33c522c37266215e9c206ea1ecd849ffa1e51430cd289"} Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.796334 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfb518bf58d12aa109b33c522c37266215e9c206ea1ecd849ffa1e51430cd289" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.796061 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.880572 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s"] Sep 29 14:08:59 crc kubenswrapper[4869]: E0929 14:08:59.880997 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerName="extract-utilities" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.881015 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerName="extract-utilities" Sep 29 14:08:59 crc kubenswrapper[4869]: E0929 14:08:59.881045 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.881059 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Sep 29 14:08:59 crc kubenswrapper[4869]: E0929 14:08:59.881082 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerName="extract-content" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.881089 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerName="extract-content" Sep 29 14:08:59 crc kubenswrapper[4869]: E0929 14:08:59.881104 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerName="registry-server" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.881112 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerName="registry-server" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.881285 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.881323 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="66d47fe6-cf7a-4505-ae63-8790e33de8f4" containerName="registry-server" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.881959 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.884192 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.884241 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.884603 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.884753 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.898147 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s"] Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.983586 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tx6p\" (UniqueName: \"kubernetes.io/projected/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-kube-api-access-6tx6p\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.983910 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:08:59 crc kubenswrapper[4869]: I0929 14:08:59.984294 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.085590 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.085721 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tx6p\" (UniqueName: \"kubernetes.io/projected/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-kube-api-access-6tx6p\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.085779 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.091881 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.091985 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.106800 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tx6p\" (UniqueName: \"kubernetes.io/projected/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-kube-api-access-6tx6p\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.203084 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.705366 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s"] Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.712457 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:09:00 crc kubenswrapper[4869]: I0929 14:09:00.804800 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" event={"ID":"9449b5c9-7a8b-44ab-86d4-17b6c4dca520","Type":"ContainerStarted","Data":"8328f51f37cd9ee2721eecf9244f1adfa4c3399fe4bd6e7a5c312862ee427b39"} Sep 29 14:09:01 crc kubenswrapper[4869]: I0929 14:09:01.815864 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" event={"ID":"9449b5c9-7a8b-44ab-86d4-17b6c4dca520","Type":"ContainerStarted","Data":"7174425c78c9158e1b2794ea7445f3d3c0105b823ebba099135cf89c13d5101d"} Sep 29 14:09:01 crc kubenswrapper[4869]: I0929 14:09:01.834208 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" podStartSLOduration=2.316802495 podStartE2EDuration="2.834189728s" podCreationTimestamp="2025-09-29 14:08:59 +0000 UTC" firstStartedPulling="2025-09-29 14:09:00.712243736 +0000 UTC m=+1667.152888056" lastFinishedPulling="2025-09-29 14:09:01.229630969 +0000 UTC m=+1667.670275289" observedRunningTime="2025-09-29 14:09:01.832459653 +0000 UTC m=+1668.273103973" watchObservedRunningTime="2025-09-29 14:09:01.834189728 +0000 UTC m=+1668.274834048" Sep 29 14:09:04 crc kubenswrapper[4869]: I0929 14:09:04.248749 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:09:04 crc kubenswrapper[4869]: E0929 14:09:04.249286 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:09:18 crc kubenswrapper[4869]: I0929 14:09:18.242509 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:09:18 crc kubenswrapper[4869]: E0929 14:09:18.243383 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:09:20 crc kubenswrapper[4869]: I0929 14:09:20.204000 4869 scope.go:117] "RemoveContainer" containerID="f9e467293f90f7a8bf950df6f03ce7a89ee70b8858330a15f3e666a69f78e93d" Sep 29 14:09:20 crc kubenswrapper[4869]: I0929 14:09:20.236296 4869 scope.go:117] "RemoveContainer" containerID="c84ece203afed896ae05e8d5449ce5169a27425e79c5eb4cffb5d0be3185b06d" Sep 29 14:09:20 crc kubenswrapper[4869]: I0929 14:09:20.260470 4869 scope.go:117] "RemoveContainer" containerID="173bb7f07d253fd3651bbb0a94ba773813e9f2459e632842c4eda9b6c9e98cbb" Sep 29 14:09:20 crc kubenswrapper[4869]: I0929 14:09:20.299557 4869 scope.go:117] "RemoveContainer" containerID="20bb7d3bc8e87e2f7be0f31ddc6fe5c244a1b0fe7bda35abbdf4710bd3dd7260" Sep 29 14:09:29 crc kubenswrapper[4869]: I0929 14:09:29.242275 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:09:29 crc kubenswrapper[4869]: E0929 14:09:29.243023 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:09:42 crc kubenswrapper[4869]: I0929 14:09:42.242299 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:09:42 crc kubenswrapper[4869]: E0929 14:09:42.243151 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:09:56 crc kubenswrapper[4869]: I0929 14:09:56.038976 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-5frp9"] Sep 29 14:09:56 crc kubenswrapper[4869]: I0929 14:09:56.054591 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-5frp9"] Sep 29 14:09:56 crc kubenswrapper[4869]: I0929 14:09:56.252109 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="131d94d1-1122-44ab-99b9-ca4b0801beac" path="/var/lib/kubelet/pods/131d94d1-1122-44ab-99b9-ca4b0801beac/volumes" Sep 29 14:09:57 crc kubenswrapper[4869]: I0929 14:09:57.242080 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:09:57 crc kubenswrapper[4869]: E0929 14:09:57.242524 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:10:00 crc kubenswrapper[4869]: I0929 14:10:00.027011 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-zmpk5"] Sep 29 14:10:00 crc kubenswrapper[4869]: I0929 14:10:00.038271 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-zmpk5"] Sep 29 14:10:00 crc kubenswrapper[4869]: I0929 14:10:00.256089 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e8902bc-102d-40ac-b875-891302ebd0fb" path="/var/lib/kubelet/pods/8e8902bc-102d-40ac-b875-891302ebd0fb/volumes" Sep 29 14:10:03 crc kubenswrapper[4869]: I0929 14:10:03.029903 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-md6jn"] Sep 29 14:10:03 crc kubenswrapper[4869]: I0929 14:10:03.056417 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-9jmg5"] Sep 29 14:10:03 crc kubenswrapper[4869]: I0929 14:10:03.065303 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-9jmg5"] Sep 29 14:10:03 crc kubenswrapper[4869]: I0929 14:10:03.073733 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-md6jn"] Sep 29 14:10:04 crc kubenswrapper[4869]: I0929 14:10:04.252950 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="497f91e5-218a-4f87-b810-0fc7d7a98a71" path="/var/lib/kubelet/pods/497f91e5-218a-4f87-b810-0fc7d7a98a71/volumes" Sep 29 14:10:04 crc kubenswrapper[4869]: I0929 14:10:04.253663 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89f8ac17-8c0d-475c-85a1-93847e5b0d8a" path="/var/lib/kubelet/pods/89f8ac17-8c0d-475c-85a1-93847e5b0d8a/volumes" Sep 29 14:10:05 crc kubenswrapper[4869]: I0929 14:10:05.029170 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-ab6b-account-create-m26p6"] Sep 29 14:10:05 crc kubenswrapper[4869]: I0929 14:10:05.040530 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-ab6b-account-create-m26p6"] Sep 29 14:10:06 crc kubenswrapper[4869]: I0929 14:10:06.262176 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="368fafef-d0b7-42b8-b2fa-73310a8111fc" path="/var/lib/kubelet/pods/368fafef-d0b7-42b8-b2fa-73310a8111fc/volumes" Sep 29 14:10:09 crc kubenswrapper[4869]: I0929 14:10:09.242317 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:10:09 crc kubenswrapper[4869]: E0929 14:10:09.244247 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:10:10 crc kubenswrapper[4869]: I0929 14:10:10.048390 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-a035-account-create-kmx6v"] Sep 29 14:10:10 crc kubenswrapper[4869]: I0929 14:10:10.060714 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-a035-account-create-kmx6v"] Sep 29 14:10:10 crc kubenswrapper[4869]: I0929 14:10:10.258362 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e" path="/var/lib/kubelet/pods/6c7a9bcc-8cb3-4821-a3fe-a62ad683d81e/volumes" Sep 29 14:10:15 crc kubenswrapper[4869]: I0929 14:10:15.023272 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-3528-account-create-kg89s"] Sep 29 14:10:15 crc kubenswrapper[4869]: I0929 14:10:15.030945 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f174-account-create-2x62k"] Sep 29 14:10:15 crc kubenswrapper[4869]: I0929 14:10:15.038554 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-f174-account-create-2x62k"] Sep 29 14:10:15 crc kubenswrapper[4869]: I0929 14:10:15.045899 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-3528-account-create-kg89s"] Sep 29 14:10:16 crc kubenswrapper[4869]: I0929 14:10:16.255540 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e851177-0022-4915-ace1-6586541e545b" path="/var/lib/kubelet/pods/9e851177-0022-4915-ace1-6586541e545b/volumes" Sep 29 14:10:16 crc kubenswrapper[4869]: I0929 14:10:16.258101 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f96c0f0-dea8-42df-aff1-0470cbeb03d2" path="/var/lib/kubelet/pods/9f96c0f0-dea8-42df-aff1-0470cbeb03d2/volumes" Sep 29 14:10:16 crc kubenswrapper[4869]: I0929 14:10:16.519519 4869 generic.go:334] "Generic (PLEG): container finished" podID="9449b5c9-7a8b-44ab-86d4-17b6c4dca520" containerID="7174425c78c9158e1b2794ea7445f3d3c0105b823ebba099135cf89c13d5101d" exitCode=0 Sep 29 14:10:16 crc kubenswrapper[4869]: I0929 14:10:16.519588 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" event={"ID":"9449b5c9-7a8b-44ab-86d4-17b6c4dca520","Type":"ContainerDied","Data":"7174425c78c9158e1b2794ea7445f3d3c0105b823ebba099135cf89c13d5101d"} Sep 29 14:10:17 crc kubenswrapper[4869]: I0929 14:10:17.917559 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.043192 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-ssh-key\") pod \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.043443 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tx6p\" (UniqueName: \"kubernetes.io/projected/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-kube-api-access-6tx6p\") pod \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.043640 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-inventory\") pod \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\" (UID: \"9449b5c9-7a8b-44ab-86d4-17b6c4dca520\") " Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.050690 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-kube-api-access-6tx6p" (OuterVolumeSpecName: "kube-api-access-6tx6p") pod "9449b5c9-7a8b-44ab-86d4-17b6c4dca520" (UID: "9449b5c9-7a8b-44ab-86d4-17b6c4dca520"). InnerVolumeSpecName "kube-api-access-6tx6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.071421 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9449b5c9-7a8b-44ab-86d4-17b6c4dca520" (UID: "9449b5c9-7a8b-44ab-86d4-17b6c4dca520"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.071436 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-inventory" (OuterVolumeSpecName: "inventory") pod "9449b5c9-7a8b-44ab-86d4-17b6c4dca520" (UID: "9449b5c9-7a8b-44ab-86d4-17b6c4dca520"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.146974 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tx6p\" (UniqueName: \"kubernetes.io/projected/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-kube-api-access-6tx6p\") on node \"crc\" DevicePath \"\"" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.147006 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.147016 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9449b5c9-7a8b-44ab-86d4-17b6c4dca520-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.541600 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" event={"ID":"9449b5c9-7a8b-44ab-86d4-17b6c4dca520","Type":"ContainerDied","Data":"8328f51f37cd9ee2721eecf9244f1adfa4c3399fe4bd6e7a5c312862ee427b39"} Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.541679 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8328f51f37cd9ee2721eecf9244f1adfa4c3399fe4bd6e7a5c312862ee427b39" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.541697 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.623075 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz"] Sep 29 14:10:18 crc kubenswrapper[4869]: E0929 14:10:18.623712 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9449b5c9-7a8b-44ab-86d4-17b6c4dca520" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.623835 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9449b5c9-7a8b-44ab-86d4-17b6c4dca520" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.624176 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9449b5c9-7a8b-44ab-86d4-17b6c4dca520" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.625024 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.629174 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.629537 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.629894 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.630173 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.633165 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz"] Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.759268 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.759899 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrlzw\" (UniqueName: \"kubernetes.io/projected/6aa4be87-45c9-4708-823b-13d5786c6046-kube-api-access-vrlzw\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.760086 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.861277 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.862462 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.862712 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrlzw\" (UniqueName: \"kubernetes.io/projected/6aa4be87-45c9-4708-823b-13d5786c6046-kube-api-access-vrlzw\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.867369 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.869152 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.879974 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrlzw\" (UniqueName: \"kubernetes.io/projected/6aa4be87-45c9-4708-823b-13d5786c6046-kube-api-access-vrlzw\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:18 crc kubenswrapper[4869]: I0929 14:10:18.955516 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:19 crc kubenswrapper[4869]: I0929 14:10:19.459221 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz"] Sep 29 14:10:19 crc kubenswrapper[4869]: I0929 14:10:19.553646 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" event={"ID":"6aa4be87-45c9-4708-823b-13d5786c6046","Type":"ContainerStarted","Data":"a9b2e86836d609d90121a630cff92458949e4bd121c5a09e307a5fca4642c31c"} Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.361922 4869 scope.go:117] "RemoveContainer" containerID="1fbbcb9dfe347b34253e56ee4a06027b0b8b34b909a9c0562503725e1b2b608d" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.386157 4869 scope.go:117] "RemoveContainer" containerID="ebbe9aee078ad6abbbd5bfcb85e663b5e65f7015187c693376a07430219644ab" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.426641 4869 scope.go:117] "RemoveContainer" containerID="4e773771b262fcd97e352a7b8fcf727f2b9279a0f1e887482ca2da633956d9d9" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.472748 4869 scope.go:117] "RemoveContainer" containerID="7caa789610a9eb785e8d8f80106a813130a414966c9fc65155c8e879dbd0e3e3" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.493294 4869 scope.go:117] "RemoveContainer" containerID="7cf09f77c3676376915d5fd1e54227c0c7a8297640d9eafe683bf1f26e1f8cb4" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.512214 4869 scope.go:117] "RemoveContainer" containerID="7e6e4854ba5509011e9b70e9f6aed6282c1107094be529bbd49099bbbfb6ee1f" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.529606 4869 scope.go:117] "RemoveContainer" containerID="417d186a4bd3d5a12a53100d4948fb7373b2fe46717fa43e2d962b3a13b46d83" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.548372 4869 scope.go:117] "RemoveContainer" containerID="94bc501c9d814f7e40f5ac59bc88ab0f4271ac3044458024f7b1f159f0b7e03e" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.567177 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" event={"ID":"6aa4be87-45c9-4708-823b-13d5786c6046","Type":"ContainerStarted","Data":"783fd8a8b004bf601aa3ffc731620391a0b288e90ba385a622bc469bcc4f4f91"} Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.574310 4869 scope.go:117] "RemoveContainer" containerID="e52c5904102c730bd64678196c012a0ccc3efe81ebd8f97a0c229d6ed0ea9ef2" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.593905 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" podStartSLOduration=2.130403475 podStartE2EDuration="2.59388027s" podCreationTimestamp="2025-09-29 14:10:18 +0000 UTC" firstStartedPulling="2025-09-29 14:10:19.459595391 +0000 UTC m=+1745.900239701" lastFinishedPulling="2025-09-29 14:10:19.923072176 +0000 UTC m=+1746.363716496" observedRunningTime="2025-09-29 14:10:20.582907346 +0000 UTC m=+1747.023551686" watchObservedRunningTime="2025-09-29 14:10:20.59388027 +0000 UTC m=+1747.034524620" Sep 29 14:10:20 crc kubenswrapper[4869]: I0929 14:10:20.601041 4869 scope.go:117] "RemoveContainer" containerID="cf0642da47c5e2e1dd99bb8506e41f5e6c985150e2038a1fe585d4dc487d4f22" Sep 29 14:10:22 crc kubenswrapper[4869]: I0929 14:10:22.242731 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:10:22 crc kubenswrapper[4869]: E0929 14:10:22.243847 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:10:25 crc kubenswrapper[4869]: I0929 14:10:25.636575 4869 generic.go:334] "Generic (PLEG): container finished" podID="6aa4be87-45c9-4708-823b-13d5786c6046" containerID="783fd8a8b004bf601aa3ffc731620391a0b288e90ba385a622bc469bcc4f4f91" exitCode=0 Sep 29 14:10:25 crc kubenswrapper[4869]: I0929 14:10:25.636636 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" event={"ID":"6aa4be87-45c9-4708-823b-13d5786c6046","Type":"ContainerDied","Data":"783fd8a8b004bf601aa3ffc731620391a0b288e90ba385a622bc469bcc4f4f91"} Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.033225 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.135864 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-inventory\") pod \"6aa4be87-45c9-4708-823b-13d5786c6046\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.136022 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrlzw\" (UniqueName: \"kubernetes.io/projected/6aa4be87-45c9-4708-823b-13d5786c6046-kube-api-access-vrlzw\") pod \"6aa4be87-45c9-4708-823b-13d5786c6046\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.136309 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-ssh-key\") pod \"6aa4be87-45c9-4708-823b-13d5786c6046\" (UID: \"6aa4be87-45c9-4708-823b-13d5786c6046\") " Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.143697 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6aa4be87-45c9-4708-823b-13d5786c6046-kube-api-access-vrlzw" (OuterVolumeSpecName: "kube-api-access-vrlzw") pod "6aa4be87-45c9-4708-823b-13d5786c6046" (UID: "6aa4be87-45c9-4708-823b-13d5786c6046"). InnerVolumeSpecName "kube-api-access-vrlzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.165760 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6aa4be87-45c9-4708-823b-13d5786c6046" (UID: "6aa4be87-45c9-4708-823b-13d5786c6046"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.166632 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-inventory" (OuterVolumeSpecName: "inventory") pod "6aa4be87-45c9-4708-823b-13d5786c6046" (UID: "6aa4be87-45c9-4708-823b-13d5786c6046"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.238601 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.238648 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrlzw\" (UniqueName: \"kubernetes.io/projected/6aa4be87-45c9-4708-823b-13d5786c6046-kube-api-access-vrlzw\") on node \"crc\" DevicePath \"\"" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.238658 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6aa4be87-45c9-4708-823b-13d5786c6046-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.657789 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" event={"ID":"6aa4be87-45c9-4708-823b-13d5786c6046","Type":"ContainerDied","Data":"a9b2e86836d609d90121a630cff92458949e4bd121c5a09e307a5fca4642c31c"} Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.657840 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9b2e86836d609d90121a630cff92458949e4bd121c5a09e307a5fca4642c31c" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.657849 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.727651 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7"] Sep 29 14:10:27 crc kubenswrapper[4869]: E0929 14:10:27.728184 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aa4be87-45c9-4708-823b-13d5786c6046" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.728201 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aa4be87-45c9-4708-823b-13d5786c6046" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.728386 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="6aa4be87-45c9-4708-823b-13d5786c6046" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.729173 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.732495 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.732510 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.732844 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.732977 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.736267 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7"] Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.850860 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.850913 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.851373 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ffzm\" (UniqueName: \"kubernetes.io/projected/19478488-9f59-4a64-b3f9-184f6a259d06-kube-api-access-8ffzm\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.954101 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ffzm\" (UniqueName: \"kubernetes.io/projected/19478488-9f59-4a64-b3f9-184f6a259d06-kube-api-access-8ffzm\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.954256 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.954279 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.963114 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.963247 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:27 crc kubenswrapper[4869]: I0929 14:10:27.978875 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ffzm\" (UniqueName: \"kubernetes.io/projected/19478488-9f59-4a64-b3f9-184f6a259d06-kube-api-access-8ffzm\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8tpx7\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:28 crc kubenswrapper[4869]: I0929 14:10:28.047822 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:10:28 crc kubenswrapper[4869]: I0929 14:10:28.574807 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7"] Sep 29 14:10:28 crc kubenswrapper[4869]: I0929 14:10:28.668786 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" event={"ID":"19478488-9f59-4a64-b3f9-184f6a259d06","Type":"ContainerStarted","Data":"9d2654c7b2849184a34183fcf3e4b55dee1e9bdda02a993fab77bdf75270c366"} Sep 29 14:10:29 crc kubenswrapper[4869]: I0929 14:10:29.684088 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" event={"ID":"19478488-9f59-4a64-b3f9-184f6a259d06","Type":"ContainerStarted","Data":"c13d29d7e28cf7d5c049adc48754f3e5ec76c5a2d71f022e04ccf29a61bd2043"} Sep 29 14:10:29 crc kubenswrapper[4869]: I0929 14:10:29.711349 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" podStartSLOduration=2.165880312 podStartE2EDuration="2.711324026s" podCreationTimestamp="2025-09-29 14:10:27 +0000 UTC" firstStartedPulling="2025-09-29 14:10:28.590119374 +0000 UTC m=+1755.030763694" lastFinishedPulling="2025-09-29 14:10:29.135563088 +0000 UTC m=+1755.576207408" observedRunningTime="2025-09-29 14:10:29.704785957 +0000 UTC m=+1756.145430277" watchObservedRunningTime="2025-09-29 14:10:29.711324026 +0000 UTC m=+1756.151968346" Sep 29 14:10:34 crc kubenswrapper[4869]: I0929 14:10:34.059291 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-f2wnx"] Sep 29 14:10:34 crc kubenswrapper[4869]: I0929 14:10:34.071555 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-f2wnx"] Sep 29 14:10:34 crc kubenswrapper[4869]: I0929 14:10:34.254847 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9ee6e01-c73e-499a-b626-ac73939d9af3" path="/var/lib/kubelet/pods/c9ee6e01-c73e-499a-b626-ac73939d9af3/volumes" Sep 29 14:10:36 crc kubenswrapper[4869]: I0929 14:10:36.241445 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:10:36 crc kubenswrapper[4869]: E0929 14:10:36.241943 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:10:37 crc kubenswrapper[4869]: I0929 14:10:37.028172 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-52dht"] Sep 29 14:10:37 crc kubenswrapper[4869]: I0929 14:10:37.035743 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-gn5ht"] Sep 29 14:10:37 crc kubenswrapper[4869]: I0929 14:10:37.043388 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-52dht"] Sep 29 14:10:37 crc kubenswrapper[4869]: I0929 14:10:37.051280 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-gn5ht"] Sep 29 14:10:38 crc kubenswrapper[4869]: I0929 14:10:38.252261 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de807e17-2ffa-4679-9156-1905ebf6c3e6" path="/var/lib/kubelet/pods/de807e17-2ffa-4679-9156-1905ebf6c3e6/volumes" Sep 29 14:10:38 crc kubenswrapper[4869]: I0929 14:10:38.253151 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df9ac455-60af-4f59-9eb1-8dcb8569a21c" path="/var/lib/kubelet/pods/df9ac455-60af-4f59-9eb1-8dcb8569a21c/volumes" Sep 29 14:10:42 crc kubenswrapper[4869]: I0929 14:10:42.047748 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-npjf6"] Sep 29 14:10:42 crc kubenswrapper[4869]: I0929 14:10:42.055172 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-npjf6"] Sep 29 14:10:42 crc kubenswrapper[4869]: I0929 14:10:42.251685 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62ecf715-d247-4d7d-baa0-2c929a73a141" path="/var/lib/kubelet/pods/62ecf715-d247-4d7d-baa0-2c929a73a141/volumes" Sep 29 14:10:48 crc kubenswrapper[4869]: I0929 14:10:48.034523 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-qghzx"] Sep 29 14:10:48 crc kubenswrapper[4869]: I0929 14:10:48.045220 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-hdwlg"] Sep 29 14:10:48 crc kubenswrapper[4869]: I0929 14:10:48.052377 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-hdwlg"] Sep 29 14:10:48 crc kubenswrapper[4869]: I0929 14:10:48.059412 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-qghzx"] Sep 29 14:10:48 crc kubenswrapper[4869]: I0929 14:10:48.251133 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf1f0c5e-7cbc-4d6f-afc4-241de854da60" path="/var/lib/kubelet/pods/bf1f0c5e-7cbc-4d6f-afc4-241de854da60/volumes" Sep 29 14:10:48 crc kubenswrapper[4869]: I0929 14:10:48.251707 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dae249ed-58ac-41dc-b84c-59ba9f80d003" path="/var/lib/kubelet/pods/dae249ed-58ac-41dc-b84c-59ba9f80d003/volumes" Sep 29 14:10:51 crc kubenswrapper[4869]: I0929 14:10:51.241893 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:10:51 crc kubenswrapper[4869]: E0929 14:10:51.242553 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:10:52 crc kubenswrapper[4869]: I0929 14:10:52.028168 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-1f59-account-create-jczsh"] Sep 29 14:10:52 crc kubenswrapper[4869]: I0929 14:10:52.035841 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b9f8-account-create-lzj29"] Sep 29 14:10:52 crc kubenswrapper[4869]: I0929 14:10:52.043520 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-1f59-account-create-jczsh"] Sep 29 14:10:52 crc kubenswrapper[4869]: I0929 14:10:52.050385 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-b9f8-account-create-lzj29"] Sep 29 14:10:52 crc kubenswrapper[4869]: I0929 14:10:52.255198 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38c7f4e3-4845-42a1-8496-18a55de398e6" path="/var/lib/kubelet/pods/38c7f4e3-4845-42a1-8496-18a55de398e6/volumes" Sep 29 14:10:52 crc kubenswrapper[4869]: I0929 14:10:52.256140 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6da504f-b624-44ac-8baa-8e144014ecea" path="/var/lib/kubelet/pods/d6da504f-b624-44ac-8baa-8e144014ecea/volumes" Sep 29 14:10:53 crc kubenswrapper[4869]: I0929 14:10:53.031859 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-de12-account-create-xmpfv"] Sep 29 14:10:53 crc kubenswrapper[4869]: I0929 14:10:53.040646 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-de12-account-create-xmpfv"] Sep 29 14:10:54 crc kubenswrapper[4869]: I0929 14:10:54.252086 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bc1ab68-07de-40a8-bae9-49548389842e" path="/var/lib/kubelet/pods/8bc1ab68-07de-40a8-bae9-49548389842e/volumes" Sep 29 14:11:05 crc kubenswrapper[4869]: I0929 14:11:05.241864 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:11:05 crc kubenswrapper[4869]: E0929 14:11:05.242587 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:11:09 crc kubenswrapper[4869]: I0929 14:11:09.082756 4869 generic.go:334] "Generic (PLEG): container finished" podID="19478488-9f59-4a64-b3f9-184f6a259d06" containerID="c13d29d7e28cf7d5c049adc48754f3e5ec76c5a2d71f022e04ccf29a61bd2043" exitCode=0 Sep 29 14:11:09 crc kubenswrapper[4869]: I0929 14:11:09.083333 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" event={"ID":"19478488-9f59-4a64-b3f9-184f6a259d06","Type":"ContainerDied","Data":"c13d29d7e28cf7d5c049adc48754f3e5ec76c5a2d71f022e04ccf29a61bd2043"} Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.499103 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.596258 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-inventory\") pod \"19478488-9f59-4a64-b3f9-184f6a259d06\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.596341 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ffzm\" (UniqueName: \"kubernetes.io/projected/19478488-9f59-4a64-b3f9-184f6a259d06-kube-api-access-8ffzm\") pod \"19478488-9f59-4a64-b3f9-184f6a259d06\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.596591 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-ssh-key\") pod \"19478488-9f59-4a64-b3f9-184f6a259d06\" (UID: \"19478488-9f59-4a64-b3f9-184f6a259d06\") " Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.601472 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19478488-9f59-4a64-b3f9-184f6a259d06-kube-api-access-8ffzm" (OuterVolumeSpecName: "kube-api-access-8ffzm") pod "19478488-9f59-4a64-b3f9-184f6a259d06" (UID: "19478488-9f59-4a64-b3f9-184f6a259d06"). InnerVolumeSpecName "kube-api-access-8ffzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.665134 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-inventory" (OuterVolumeSpecName: "inventory") pod "19478488-9f59-4a64-b3f9-184f6a259d06" (UID: "19478488-9f59-4a64-b3f9-184f6a259d06"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.680093 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "19478488-9f59-4a64-b3f9-184f6a259d06" (UID: "19478488-9f59-4a64-b3f9-184f6a259d06"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.700420 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.700466 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ffzm\" (UniqueName: \"kubernetes.io/projected/19478488-9f59-4a64-b3f9-184f6a259d06-kube-api-access-8ffzm\") on node \"crc\" DevicePath \"\"" Sep 29 14:11:10 crc kubenswrapper[4869]: I0929 14:11:10.700482 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19478488-9f59-4a64-b3f9-184f6a259d06-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.104475 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" event={"ID":"19478488-9f59-4a64-b3f9-184f6a259d06","Type":"ContainerDied","Data":"9d2654c7b2849184a34183fcf3e4b55dee1e9bdda02a993fab77bdf75270c366"} Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.104519 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d2654c7b2849184a34183fcf3e4b55dee1e9bdda02a993fab77bdf75270c366" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.104560 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.177419 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2"] Sep 29 14:11:11 crc kubenswrapper[4869]: E0929 14:11:11.177854 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19478488-9f59-4a64-b3f9-184f6a259d06" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.177875 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="19478488-9f59-4a64-b3f9-184f6a259d06" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.178047 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="19478488-9f59-4a64-b3f9-184f6a259d06" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.178918 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.181265 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.181883 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.182055 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.182072 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.189754 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2"] Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.312567 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vw592\" (UniqueName: \"kubernetes.io/projected/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-kube-api-access-vw592\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.312769 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.312980 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.415008 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.415126 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.415183 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vw592\" (UniqueName: \"kubernetes.io/projected/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-kube-api-access-vw592\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.420247 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.421462 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.433903 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vw592\" (UniqueName: \"kubernetes.io/projected/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-kube-api-access-vw592\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:11 crc kubenswrapper[4869]: I0929 14:11:11.494635 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:12 crc kubenswrapper[4869]: I0929 14:11:12.005595 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2"] Sep 29 14:11:12 crc kubenswrapper[4869]: I0929 14:11:12.122828 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" event={"ID":"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8","Type":"ContainerStarted","Data":"c51864b5fb75e34e10becc3c2325efa8ae57488cc1e21d755c6f9714dfff3c0e"} Sep 29 14:11:13 crc kubenswrapper[4869]: I0929 14:11:13.132569 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" event={"ID":"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8","Type":"ContainerStarted","Data":"39922a592ef8be57b48593cb5e9ad5659c195ac5ef1f90fd43c15c50ce5fc5f6"} Sep 29 14:11:13 crc kubenswrapper[4869]: I0929 14:11:13.148519 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" podStartSLOduration=1.694964556 podStartE2EDuration="2.148503245s" podCreationTimestamp="2025-09-29 14:11:11 +0000 UTC" firstStartedPulling="2025-09-29 14:11:12.012978775 +0000 UTC m=+1798.453623085" lastFinishedPulling="2025-09-29 14:11:12.466517454 +0000 UTC m=+1798.907161774" observedRunningTime="2025-09-29 14:11:13.145725014 +0000 UTC m=+1799.586369354" watchObservedRunningTime="2025-09-29 14:11:13.148503245 +0000 UTC m=+1799.589147565" Sep 29 14:11:16 crc kubenswrapper[4869]: I0929 14:11:16.039020 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-vxszp"] Sep 29 14:11:16 crc kubenswrapper[4869]: I0929 14:11:16.049321 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-vxszp"] Sep 29 14:11:16 crc kubenswrapper[4869]: I0929 14:11:16.254208 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c7daafc-ae3a-4161-8e8e-8e3651a1afcc" path="/var/lib/kubelet/pods/2c7daafc-ae3a-4161-8e8e-8e3651a1afcc/volumes" Sep 29 14:11:17 crc kubenswrapper[4869]: I0929 14:11:17.167711 4869 generic.go:334] "Generic (PLEG): container finished" podID="fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8" containerID="39922a592ef8be57b48593cb5e9ad5659c195ac5ef1f90fd43c15c50ce5fc5f6" exitCode=0 Sep 29 14:11:17 crc kubenswrapper[4869]: I0929 14:11:17.167763 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" event={"ID":"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8","Type":"ContainerDied","Data":"39922a592ef8be57b48593cb5e9ad5659c195ac5ef1f90fd43c15c50ce5fc5f6"} Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.027708 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-bn5lj"] Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.036729 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-bn5lj"] Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.257283 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b77999d8-2492-4cde-8630-d0110a1884fb" path="/var/lib/kubelet/pods/b77999d8-2492-4cde-8630-d0110a1884fb/volumes" Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.509857 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.651553 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vw592\" (UniqueName: \"kubernetes.io/projected/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-kube-api-access-vw592\") pod \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.651887 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-ssh-key\") pod \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.651911 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-inventory\") pod \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\" (UID: \"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8\") " Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.659885 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-kube-api-access-vw592" (OuterVolumeSpecName: "kube-api-access-vw592") pod "fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8" (UID: "fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8"). InnerVolumeSpecName "kube-api-access-vw592". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.680783 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-inventory" (OuterVolumeSpecName: "inventory") pod "fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8" (UID: "fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.681158 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8" (UID: "fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.754061 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vw592\" (UniqueName: \"kubernetes.io/projected/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-kube-api-access-vw592\") on node \"crc\" DevicePath \"\"" Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.754105 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:11:18 crc kubenswrapper[4869]: I0929 14:11:18.754118 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.190051 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" event={"ID":"fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8","Type":"ContainerDied","Data":"c51864b5fb75e34e10becc3c2325efa8ae57488cc1e21d755c6f9714dfff3c0e"} Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.190098 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c51864b5fb75e34e10becc3c2325efa8ae57488cc1e21d755c6f9714dfff3c0e" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.190103 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.256062 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb"] Sep 29 14:11:19 crc kubenswrapper[4869]: E0929 14:11:19.256463 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.256481 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.256730 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.257366 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.260455 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.261464 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.261707 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.263532 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.297032 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb"] Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.369779 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swd9j\" (UniqueName: \"kubernetes.io/projected/cb4e617b-b904-4bd2-b84b-daf3d4265da1-kube-api-access-swd9j\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.369963 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.370012 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.472313 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swd9j\" (UniqueName: \"kubernetes.io/projected/cb4e617b-b904-4bd2-b84b-daf3d4265da1-kube-api-access-swd9j\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.472430 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.472452 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.476047 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.476118 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.490091 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swd9j\" (UniqueName: \"kubernetes.io/projected/cb4e617b-b904-4bd2-b84b-daf3d4265da1-kube-api-access-swd9j\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:19 crc kubenswrapper[4869]: I0929 14:11:19.580287 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.024529 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-zl26d"] Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.032093 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-zl26d"] Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.069061 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb"] Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.200926 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" event={"ID":"cb4e617b-b904-4bd2-b84b-daf3d4265da1","Type":"ContainerStarted","Data":"b28f277dbfc4e4727fa07dc700f0575905fd810a3daea88bdfd59c9695faa19f"} Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.241797 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:11:20 crc kubenswrapper[4869]: E0929 14:11:20.242049 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.253965 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c" path="/var/lib/kubelet/pods/0608e6b0-5ca1-4a7e-80d5-0286e50dbd1c/volumes" Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.773817 4869 scope.go:117] "RemoveContainer" containerID="d3ffed8f08c55753d1820f91211c4143f452770337a6ff28eb5eacc04a5db8ea" Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.823790 4869 scope.go:117] "RemoveContainer" containerID="28f15fcd29f2e42dc3ace4082b33b77f840aaf12cd9d59c8540efffc609c7c37" Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.858844 4869 scope.go:117] "RemoveContainer" containerID="15ac72790e3fa861235f9575970b77d6f7273d7b04834e83edf5a00f5fd9382c" Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.894992 4869 scope.go:117] "RemoveContainer" containerID="3202ffa8eca92c287d35f416b8c3855cf995f57086a845695a6ff1526d0177f0" Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.943455 4869 scope.go:117] "RemoveContainer" containerID="1b4c042eb5b18c1f24bfde21733be358d8ea0a065d2648a2df936e27551f973b" Sep 29 14:11:20 crc kubenswrapper[4869]: I0929 14:11:20.994592 4869 scope.go:117] "RemoveContainer" containerID="eaf2463bab505fc2a57a29dce4f2cda19bc17e4f1582dae8866b0e748a217ecb" Sep 29 14:11:21 crc kubenswrapper[4869]: I0929 14:11:21.067878 4869 scope.go:117] "RemoveContainer" containerID="204b34762bcc1c2859973455721efec89dcf694fc8f5ba4610d6eb234eb0eda0" Sep 29 14:11:21 crc kubenswrapper[4869]: I0929 14:11:21.087047 4869 scope.go:117] "RemoveContainer" containerID="b5ea25fc052f09c2b7433621953e6df568bb6a63e11f83d9ff6e0e5305c29467" Sep 29 14:11:21 crc kubenswrapper[4869]: I0929 14:11:21.129962 4869 scope.go:117] "RemoveContainer" containerID="0321aafb5f9d3b8ecc8038ebbcd454d68bf605d1d7daed969e718c88ed4f1bc8" Sep 29 14:11:21 crc kubenswrapper[4869]: I0929 14:11:21.162859 4869 scope.go:117] "RemoveContainer" containerID="159c01f7798e101c42860c996158544b046ab41145230ab7ed9c0632fab9187d" Sep 29 14:11:21 crc kubenswrapper[4869]: I0929 14:11:21.224291 4869 scope.go:117] "RemoveContainer" containerID="894d47b410b578430459cc03e5040ad2a4bd1b4b07eff321296a3170c8de05bc" Sep 29 14:11:21 crc kubenswrapper[4869]: I0929 14:11:21.254234 4869 scope.go:117] "RemoveContainer" containerID="255dd11c37ea2aaf037ec13f7e55e3c9c705fc87b3e9bfe27c738c04fdf17a2d" Sep 29 14:11:22 crc kubenswrapper[4869]: I0929 14:11:22.232449 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" event={"ID":"cb4e617b-b904-4bd2-b84b-daf3d4265da1","Type":"ContainerStarted","Data":"e5c1c31eeb7183ccc2a3e1b302c454abdf07b06cd1de5105eaefab3f0faa175f"} Sep 29 14:11:22 crc kubenswrapper[4869]: I0929 14:11:22.256173 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" podStartSLOduration=1.714561617 podStartE2EDuration="3.256155841s" podCreationTimestamp="2025-09-29 14:11:19 +0000 UTC" firstStartedPulling="2025-09-29 14:11:20.077335181 +0000 UTC m=+1806.517979511" lastFinishedPulling="2025-09-29 14:11:21.618929415 +0000 UTC m=+1808.059573735" observedRunningTime="2025-09-29 14:11:22.25069816 +0000 UTC m=+1808.691342500" watchObservedRunningTime="2025-09-29 14:11:22.256155841 +0000 UTC m=+1808.696800161" Sep 29 14:11:32 crc kubenswrapper[4869]: I0929 14:11:32.242160 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:11:32 crc kubenswrapper[4869]: E0929 14:11:32.242843 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:11:36 crc kubenswrapper[4869]: I0929 14:11:36.035198 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-h5s6m"] Sep 29 14:11:36 crc kubenswrapper[4869]: I0929 14:11:36.044529 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-h5s6m"] Sep 29 14:11:36 crc kubenswrapper[4869]: I0929 14:11:36.263355 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e99dfaa4-d2d7-411a-bea7-4a4768c31ee4" path="/var/lib/kubelet/pods/e99dfaa4-d2d7-411a-bea7-4a4768c31ee4/volumes" Sep 29 14:11:44 crc kubenswrapper[4869]: I0929 14:11:44.247990 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:11:44 crc kubenswrapper[4869]: E0929 14:11:44.248794 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:11:46 crc kubenswrapper[4869]: I0929 14:11:46.040304 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-nmzfx"] Sep 29 14:11:46 crc kubenswrapper[4869]: I0929 14:11:46.047556 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-nmzfx"] Sep 29 14:11:46 crc kubenswrapper[4869]: I0929 14:11:46.255326 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="250a15d6-2b1f-4b59-9564-7c7240c9b84e" path="/var/lib/kubelet/pods/250a15d6-2b1f-4b59-9564-7c7240c9b84e/volumes" Sep 29 14:11:56 crc kubenswrapper[4869]: I0929 14:11:56.242227 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:11:56 crc kubenswrapper[4869]: E0929 14:11:56.242978 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:12:11 crc kubenswrapper[4869]: I0929 14:12:11.242416 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:12:11 crc kubenswrapper[4869]: E0929 14:12:11.245245 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:12:11 crc kubenswrapper[4869]: I0929 14:12:11.680365 4869 generic.go:334] "Generic (PLEG): container finished" podID="cb4e617b-b904-4bd2-b84b-daf3d4265da1" containerID="e5c1c31eeb7183ccc2a3e1b302c454abdf07b06cd1de5105eaefab3f0faa175f" exitCode=0 Sep 29 14:12:11 crc kubenswrapper[4869]: I0929 14:12:11.680452 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" event={"ID":"cb4e617b-b904-4bd2-b84b-daf3d4265da1","Type":"ContainerDied","Data":"e5c1c31eeb7183ccc2a3e1b302c454abdf07b06cd1de5105eaefab3f0faa175f"} Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.055047 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.133073 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-inventory\") pod \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.133187 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-ssh-key\") pod \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.133401 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swd9j\" (UniqueName: \"kubernetes.io/projected/cb4e617b-b904-4bd2-b84b-daf3d4265da1-kube-api-access-swd9j\") pod \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\" (UID: \"cb4e617b-b904-4bd2-b84b-daf3d4265da1\") " Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.137815 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb4e617b-b904-4bd2-b84b-daf3d4265da1-kube-api-access-swd9j" (OuterVolumeSpecName: "kube-api-access-swd9j") pod "cb4e617b-b904-4bd2-b84b-daf3d4265da1" (UID: "cb4e617b-b904-4bd2-b84b-daf3d4265da1"). InnerVolumeSpecName "kube-api-access-swd9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.165194 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cb4e617b-b904-4bd2-b84b-daf3d4265da1" (UID: "cb4e617b-b904-4bd2-b84b-daf3d4265da1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.165632 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-inventory" (OuterVolumeSpecName: "inventory") pod "cb4e617b-b904-4bd2-b84b-daf3d4265da1" (UID: "cb4e617b-b904-4bd2-b84b-daf3d4265da1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.234835 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swd9j\" (UniqueName: \"kubernetes.io/projected/cb4e617b-b904-4bd2-b84b-daf3d4265da1-kube-api-access-swd9j\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.234865 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.234875 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb4e617b-b904-4bd2-b84b-daf3d4265da1-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.700410 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" event={"ID":"cb4e617b-b904-4bd2-b84b-daf3d4265da1","Type":"ContainerDied","Data":"b28f277dbfc4e4727fa07dc700f0575905fd810a3daea88bdfd59c9695faa19f"} Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.700446 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b28f277dbfc4e4727fa07dc700f0575905fd810a3daea88bdfd59c9695faa19f" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.700553 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.781273 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-sghc5"] Sep 29 14:12:13 crc kubenswrapper[4869]: E0929 14:12:13.781714 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb4e617b-b904-4bd2-b84b-daf3d4265da1" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.781729 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb4e617b-b904-4bd2-b84b-daf3d4265da1" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.781936 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb4e617b-b904-4bd2-b84b-daf3d4265da1" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.782642 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.784513 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.785678 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.785775 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.785924 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.789061 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-sghc5"] Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.847176 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6r2b\" (UniqueName: \"kubernetes.io/projected/f999ef7e-3672-46d6-b432-10b37fa93176-kube-api-access-f6r2b\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.847282 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.847349 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.948836 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6r2b\" (UniqueName: \"kubernetes.io/projected/f999ef7e-3672-46d6-b432-10b37fa93176-kube-api-access-f6r2b\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.948918 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.948958 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.952997 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.953178 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:13 crc kubenswrapper[4869]: I0929 14:12:13.965827 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6r2b\" (UniqueName: \"kubernetes.io/projected/f999ef7e-3672-46d6-b432-10b37fa93176-kube-api-access-f6r2b\") pod \"ssh-known-hosts-edpm-deployment-sghc5\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:14 crc kubenswrapper[4869]: I0929 14:12:14.106935 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:14 crc kubenswrapper[4869]: I0929 14:12:14.789965 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-sghc5"] Sep 29 14:12:15 crc kubenswrapper[4869]: I0929 14:12:15.722668 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" event={"ID":"f999ef7e-3672-46d6-b432-10b37fa93176","Type":"ContainerStarted","Data":"31b561b32cc9ad9c708e5b39e25ae4e62999d7175aff1cd7dfd3f0e7a1171146"} Sep 29 14:12:16 crc kubenswrapper[4869]: I0929 14:12:16.120335 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:12:16 crc kubenswrapper[4869]: I0929 14:12:16.733361 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" event={"ID":"f999ef7e-3672-46d6-b432-10b37fa93176","Type":"ContainerStarted","Data":"2d2e8ee1ff2029fa6c26d37397b3f4a17ec1cd344f992cba9612d7371cc25e0d"} Sep 29 14:12:16 crc kubenswrapper[4869]: I0929 14:12:16.756919 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" podStartSLOduration=2.429005732 podStartE2EDuration="3.756901092s" podCreationTimestamp="2025-09-29 14:12:13 +0000 UTC" firstStartedPulling="2025-09-29 14:12:14.789439591 +0000 UTC m=+1861.230083931" lastFinishedPulling="2025-09-29 14:12:16.117334971 +0000 UTC m=+1862.557979291" observedRunningTime="2025-09-29 14:12:16.749146962 +0000 UTC m=+1863.189791322" watchObservedRunningTime="2025-09-29 14:12:16.756901092 +0000 UTC m=+1863.197545412" Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.043658 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-95tdg"] Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.053736 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-lttz8"] Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.064861 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-c4pdp"] Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.072293 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-95tdg"] Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.079118 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-c4pdp"] Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.086244 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-lttz8"] Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.277514 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27cd3399-68ec-4d81-8a5c-695980634d8c" path="/var/lib/kubelet/pods/27cd3399-68ec-4d81-8a5c-695980634d8c/volumes" Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.278118 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8adecd5f-ad38-4d6c-a1d7-f382f5357c48" path="/var/lib/kubelet/pods/8adecd5f-ad38-4d6c-a1d7-f382f5357c48/volumes" Sep 29 14:12:20 crc kubenswrapper[4869]: I0929 14:12:20.278585 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b222443-94e1-4451-9fdd-3240cba54a38" path="/var/lib/kubelet/pods/8b222443-94e1-4451-9fdd-3240cba54a38/volumes" Sep 29 14:12:21 crc kubenswrapper[4869]: I0929 14:12:21.801921 4869 scope.go:117] "RemoveContainer" containerID="8a217821e33319e4e8f68d3771142dc639f890cfafa66e7e40b8f7264817b434" Sep 29 14:12:21 crc kubenswrapper[4869]: I0929 14:12:21.829302 4869 scope.go:117] "RemoveContainer" containerID="c1b5c190354c6081c48119f76f3d1a7d5e741635899a3177a3ca14daf830eec3" Sep 29 14:12:21 crc kubenswrapper[4869]: I0929 14:12:21.892302 4869 scope.go:117] "RemoveContainer" containerID="b0d591311cd48a960363e8123c0559e78aaef7c766c29898e1144a1bf4d59f2b" Sep 29 14:12:21 crc kubenswrapper[4869]: I0929 14:12:21.919587 4869 scope.go:117] "RemoveContainer" containerID="d38f66d7729ddafdd54b8caf3d46433c8fe07756d799abde3f03ccb03f489683" Sep 29 14:12:21 crc kubenswrapper[4869]: I0929 14:12:21.963305 4869 scope.go:117] "RemoveContainer" containerID="c84fd0ff80087f86194e3d7952c169c32bbeb8f29e2fbefbaa287783c29b1a76" Sep 29 14:12:23 crc kubenswrapper[4869]: I0929 14:12:23.242089 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:12:23 crc kubenswrapper[4869]: I0929 14:12:23.807946 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"3ff83f4fdc5296b479a17be426540e9999710a227e77cb8f76f5fb8b3f7cdc5f"} Sep 29 14:12:23 crc kubenswrapper[4869]: I0929 14:12:23.809526 4869 generic.go:334] "Generic (PLEG): container finished" podID="f999ef7e-3672-46d6-b432-10b37fa93176" containerID="2d2e8ee1ff2029fa6c26d37397b3f4a17ec1cd344f992cba9612d7371cc25e0d" exitCode=0 Sep 29 14:12:23 crc kubenswrapper[4869]: I0929 14:12:23.809577 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" event={"ID":"f999ef7e-3672-46d6-b432-10b37fa93176","Type":"ContainerDied","Data":"2d2e8ee1ff2029fa6c26d37397b3f4a17ec1cd344f992cba9612d7371cc25e0d"} Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.343845 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.368385 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-inventory-0\") pod \"f999ef7e-3672-46d6-b432-10b37fa93176\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.368551 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-ssh-key-openstack-edpm-ipam\") pod \"f999ef7e-3672-46d6-b432-10b37fa93176\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.368590 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6r2b\" (UniqueName: \"kubernetes.io/projected/f999ef7e-3672-46d6-b432-10b37fa93176-kube-api-access-f6r2b\") pod \"f999ef7e-3672-46d6-b432-10b37fa93176\" (UID: \"f999ef7e-3672-46d6-b432-10b37fa93176\") " Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.374421 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f999ef7e-3672-46d6-b432-10b37fa93176-kube-api-access-f6r2b" (OuterVolumeSpecName: "kube-api-access-f6r2b") pod "f999ef7e-3672-46d6-b432-10b37fa93176" (UID: "f999ef7e-3672-46d6-b432-10b37fa93176"). InnerVolumeSpecName "kube-api-access-f6r2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.396165 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "f999ef7e-3672-46d6-b432-10b37fa93176" (UID: "f999ef7e-3672-46d6-b432-10b37fa93176"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.401783 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "f999ef7e-3672-46d6-b432-10b37fa93176" (UID: "f999ef7e-3672-46d6-b432-10b37fa93176"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.471240 4869 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-inventory-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.471527 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f999ef7e-3672-46d6-b432-10b37fa93176-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.471541 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6r2b\" (UniqueName: \"kubernetes.io/projected/f999ef7e-3672-46d6-b432-10b37fa93176-kube-api-access-f6r2b\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.831887 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" event={"ID":"f999ef7e-3672-46d6-b432-10b37fa93176","Type":"ContainerDied","Data":"31b561b32cc9ad9c708e5b39e25ae4e62999d7175aff1cd7dfd3f0e7a1171146"} Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.831932 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31b561b32cc9ad9c708e5b39e25ae4e62999d7175aff1cd7dfd3f0e7a1171146" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.832157 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-sghc5" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.936791 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb"] Sep 29 14:12:25 crc kubenswrapper[4869]: E0929 14:12:25.937272 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f999ef7e-3672-46d6-b432-10b37fa93176" containerName="ssh-known-hosts-edpm-deployment" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.937288 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f999ef7e-3672-46d6-b432-10b37fa93176" containerName="ssh-known-hosts-edpm-deployment" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.937552 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f999ef7e-3672-46d6-b432-10b37fa93176" containerName="ssh-known-hosts-edpm-deployment" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.938358 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.941601 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.941842 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.941962 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.942067 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:12:25 crc kubenswrapper[4869]: I0929 14:12:25.945302 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb"] Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.080114 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hxwc\" (UniqueName: \"kubernetes.io/projected/ebab3154-a1b6-4616-a738-5e059b3e62b4-kube-api-access-7hxwc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.080762 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.080970 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.182347 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.182452 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.182540 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hxwc\" (UniqueName: \"kubernetes.io/projected/ebab3154-a1b6-4616-a738-5e059b3e62b4-kube-api-access-7hxwc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.187191 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.189563 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.197291 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hxwc\" (UniqueName: \"kubernetes.io/projected/ebab3154-a1b6-4616-a738-5e059b3e62b4-kube-api-access-7hxwc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-4gdvb\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.258207 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.735922 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb"] Sep 29 14:12:26 crc kubenswrapper[4869]: I0929 14:12:26.841283 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" event={"ID":"ebab3154-a1b6-4616-a738-5e059b3e62b4","Type":"ContainerStarted","Data":"5b362b01c9ec150323ec17b97c6183bfff1560dcfb4c623627a8d5dc5779d420"} Sep 29 14:12:27 crc kubenswrapper[4869]: I0929 14:12:27.850151 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" event={"ID":"ebab3154-a1b6-4616-a738-5e059b3e62b4","Type":"ContainerStarted","Data":"11dd966e749c4b00fe02eba325498b3c68b1bb5f6f3e1482d3a46817296a79a9"} Sep 29 14:12:30 crc kubenswrapper[4869]: I0929 14:12:30.032077 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" podStartSLOduration=4.5771700410000005 podStartE2EDuration="5.032060147s" podCreationTimestamp="2025-09-29 14:12:25 +0000 UTC" firstStartedPulling="2025-09-29 14:12:26.741879772 +0000 UTC m=+1873.182524092" lastFinishedPulling="2025-09-29 14:12:27.196769878 +0000 UTC m=+1873.637414198" observedRunningTime="2025-09-29 14:12:27.873057234 +0000 UTC m=+1874.313701554" watchObservedRunningTime="2025-09-29 14:12:30.032060147 +0000 UTC m=+1876.472704467" Sep 29 14:12:30 crc kubenswrapper[4869]: I0929 14:12:30.034363 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-13a5-account-create-hl72l"] Sep 29 14:12:30 crc kubenswrapper[4869]: I0929 14:12:30.044888 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-150c-account-create-ftbhc"] Sep 29 14:12:30 crc kubenswrapper[4869]: I0929 14:12:30.055769 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-13a5-account-create-hl72l"] Sep 29 14:12:30 crc kubenswrapper[4869]: I0929 14:12:30.063856 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-150c-account-create-ftbhc"] Sep 29 14:12:30 crc kubenswrapper[4869]: I0929 14:12:30.253758 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4800f947-0c44-4799-bcd2-6fae29f29ff4" path="/var/lib/kubelet/pods/4800f947-0c44-4799-bcd2-6fae29f29ff4/volumes" Sep 29 14:12:30 crc kubenswrapper[4869]: I0929 14:12:30.254873 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="701be68f-d223-469d-a64c-f813a6254027" path="/var/lib/kubelet/pods/701be68f-d223-469d-a64c-f813a6254027/volumes" Sep 29 14:12:31 crc kubenswrapper[4869]: I0929 14:12:31.025690 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-442e-account-create-5rm42"] Sep 29 14:12:31 crc kubenswrapper[4869]: I0929 14:12:31.042232 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-442e-account-create-5rm42"] Sep 29 14:12:32 crc kubenswrapper[4869]: I0929 14:12:32.257086 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf7ac506-bb7e-4b3b-949c-d12dfae9f756" path="/var/lib/kubelet/pods/bf7ac506-bb7e-4b3b-949c-d12dfae9f756/volumes" Sep 29 14:12:35 crc kubenswrapper[4869]: I0929 14:12:35.933000 4869 generic.go:334] "Generic (PLEG): container finished" podID="ebab3154-a1b6-4616-a738-5e059b3e62b4" containerID="11dd966e749c4b00fe02eba325498b3c68b1bb5f6f3e1482d3a46817296a79a9" exitCode=0 Sep 29 14:12:35 crc kubenswrapper[4869]: I0929 14:12:35.933070 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" event={"ID":"ebab3154-a1b6-4616-a738-5e059b3e62b4","Type":"ContainerDied","Data":"11dd966e749c4b00fe02eba325498b3c68b1bb5f6f3e1482d3a46817296a79a9"} Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.339569 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.517452 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-inventory\") pod \"ebab3154-a1b6-4616-a738-5e059b3e62b4\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.517668 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-ssh-key\") pod \"ebab3154-a1b6-4616-a738-5e059b3e62b4\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.517712 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hxwc\" (UniqueName: \"kubernetes.io/projected/ebab3154-a1b6-4616-a738-5e059b3e62b4-kube-api-access-7hxwc\") pod \"ebab3154-a1b6-4616-a738-5e059b3e62b4\" (UID: \"ebab3154-a1b6-4616-a738-5e059b3e62b4\") " Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.523532 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebab3154-a1b6-4616-a738-5e059b3e62b4-kube-api-access-7hxwc" (OuterVolumeSpecName: "kube-api-access-7hxwc") pod "ebab3154-a1b6-4616-a738-5e059b3e62b4" (UID: "ebab3154-a1b6-4616-a738-5e059b3e62b4"). InnerVolumeSpecName "kube-api-access-7hxwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.543975 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-inventory" (OuterVolumeSpecName: "inventory") pod "ebab3154-a1b6-4616-a738-5e059b3e62b4" (UID: "ebab3154-a1b6-4616-a738-5e059b3e62b4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.546594 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ebab3154-a1b6-4616-a738-5e059b3e62b4" (UID: "ebab3154-a1b6-4616-a738-5e059b3e62b4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.620702 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.620738 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebab3154-a1b6-4616-a738-5e059b3e62b4-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.620749 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hxwc\" (UniqueName: \"kubernetes.io/projected/ebab3154-a1b6-4616-a738-5e059b3e62b4-kube-api-access-7hxwc\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.951051 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" event={"ID":"ebab3154-a1b6-4616-a738-5e059b3e62b4","Type":"ContainerDied","Data":"5b362b01c9ec150323ec17b97c6183bfff1560dcfb4c623627a8d5dc5779d420"} Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.951084 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b362b01c9ec150323ec17b97c6183bfff1560dcfb4c623627a8d5dc5779d420" Sep 29 14:12:37 crc kubenswrapper[4869]: I0929 14:12:37.951110 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.021269 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9"] Sep 29 14:12:38 crc kubenswrapper[4869]: E0929 14:12:38.021976 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebab3154-a1b6-4616-a738-5e059b3e62b4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.021999 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebab3154-a1b6-4616-a738-5e059b3e62b4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.022221 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebab3154-a1b6-4616-a738-5e059b3e62b4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.023678 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.025662 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.026782 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.027206 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.027409 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.035405 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9"] Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.128318 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w87n2\" (UniqueName: \"kubernetes.io/projected/9a2acbf6-f4da-420a-8325-a49b7550a6fe-kube-api-access-w87n2\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.128521 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.128571 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.230688 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.230764 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.230824 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w87n2\" (UniqueName: \"kubernetes.io/projected/9a2acbf6-f4da-420a-8325-a49b7550a6fe-kube-api-access-w87n2\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.238538 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.238753 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.245929 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w87n2\" (UniqueName: \"kubernetes.io/projected/9a2acbf6-f4da-420a-8325-a49b7550a6fe-kube-api-access-w87n2\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.346532 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.911372 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9"] Sep 29 14:12:38 crc kubenswrapper[4869]: W0929 14:12:38.917844 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a2acbf6_f4da_420a_8325_a49b7550a6fe.slice/crio-f990ed76521d029d42100fd22d88db8d12889fc2e2f68af6630c4c3c15daf450 WatchSource:0}: Error finding container f990ed76521d029d42100fd22d88db8d12889fc2e2f68af6630c4c3c15daf450: Status 404 returned error can't find the container with id f990ed76521d029d42100fd22d88db8d12889fc2e2f68af6630c4c3c15daf450 Sep 29 14:12:38 crc kubenswrapper[4869]: I0929 14:12:38.961652 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" event={"ID":"9a2acbf6-f4da-420a-8325-a49b7550a6fe","Type":"ContainerStarted","Data":"f990ed76521d029d42100fd22d88db8d12889fc2e2f68af6630c4c3c15daf450"} Sep 29 14:12:40 crc kubenswrapper[4869]: I0929 14:12:40.980639 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" event={"ID":"9a2acbf6-f4da-420a-8325-a49b7550a6fe","Type":"ContainerStarted","Data":"6519701610b59e8a4e484deda1c386b486a671d975703883fc57dfee9804c604"} Sep 29 14:12:41 crc kubenswrapper[4869]: I0929 14:12:41.000960 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" podStartSLOduration=3.140943398 podStartE2EDuration="4.000946127s" podCreationTimestamp="2025-09-29 14:12:37 +0000 UTC" firstStartedPulling="2025-09-29 14:12:38.920472866 +0000 UTC m=+1885.361117186" lastFinishedPulling="2025-09-29 14:12:39.780475595 +0000 UTC m=+1886.221119915" observedRunningTime="2025-09-29 14:12:41.000279259 +0000 UTC m=+1887.440923599" watchObservedRunningTime="2025-09-29 14:12:41.000946127 +0000 UTC m=+1887.441590447" Sep 29 14:12:49 crc kubenswrapper[4869]: I0929 14:12:49.043159 4869 generic.go:334] "Generic (PLEG): container finished" podID="9a2acbf6-f4da-420a-8325-a49b7550a6fe" containerID="6519701610b59e8a4e484deda1c386b486a671d975703883fc57dfee9804c604" exitCode=0 Sep 29 14:12:49 crc kubenswrapper[4869]: I0929 14:12:49.043681 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" event={"ID":"9a2acbf6-f4da-420a-8325-a49b7550a6fe","Type":"ContainerDied","Data":"6519701610b59e8a4e484deda1c386b486a671d975703883fc57dfee9804c604"} Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.464508 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.570576 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w87n2\" (UniqueName: \"kubernetes.io/projected/9a2acbf6-f4da-420a-8325-a49b7550a6fe-kube-api-access-w87n2\") pod \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.570754 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-inventory\") pod \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.570884 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-ssh-key\") pod \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\" (UID: \"9a2acbf6-f4da-420a-8325-a49b7550a6fe\") " Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.586974 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a2acbf6-f4da-420a-8325-a49b7550a6fe-kube-api-access-w87n2" (OuterVolumeSpecName: "kube-api-access-w87n2") pod "9a2acbf6-f4da-420a-8325-a49b7550a6fe" (UID: "9a2acbf6-f4da-420a-8325-a49b7550a6fe"). InnerVolumeSpecName "kube-api-access-w87n2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.605933 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9a2acbf6-f4da-420a-8325-a49b7550a6fe" (UID: "9a2acbf6-f4da-420a-8325-a49b7550a6fe"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.623592 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-inventory" (OuterVolumeSpecName: "inventory") pod "9a2acbf6-f4da-420a-8325-a49b7550a6fe" (UID: "9a2acbf6-f4da-420a-8325-a49b7550a6fe"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.673572 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w87n2\" (UniqueName: \"kubernetes.io/projected/9a2acbf6-f4da-420a-8325-a49b7550a6fe-kube-api-access-w87n2\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.673985 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:50 crc kubenswrapper[4869]: I0929 14:12:50.673999 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a2acbf6-f4da-420a-8325-a49b7550a6fe-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:12:51 crc kubenswrapper[4869]: I0929 14:12:51.061985 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" event={"ID":"9a2acbf6-f4da-420a-8325-a49b7550a6fe","Type":"ContainerDied","Data":"f990ed76521d029d42100fd22d88db8d12889fc2e2f68af6630c4c3c15daf450"} Sep 29 14:12:51 crc kubenswrapper[4869]: I0929 14:12:51.062033 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f990ed76521d029d42100fd22d88db8d12889fc2e2f68af6630c4c3c15daf450" Sep 29 14:12:51 crc kubenswrapper[4869]: I0929 14:12:51.062064 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9" Sep 29 14:12:57 crc kubenswrapper[4869]: I0929 14:12:57.036357 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9fqpp"] Sep 29 14:12:57 crc kubenswrapper[4869]: I0929 14:12:57.044707 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9fqpp"] Sep 29 14:12:58 crc kubenswrapper[4869]: I0929 14:12:58.252539 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74c97303-a69c-471b-8b23-4a72ec813beb" path="/var/lib/kubelet/pods/74c97303-a69c-471b-8b23-4a72ec813beb/volumes" Sep 29 14:13:20 crc kubenswrapper[4869]: I0929 14:13:20.049759 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-6xgt7"] Sep 29 14:13:20 crc kubenswrapper[4869]: I0929 14:13:20.058934 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-6xgt7"] Sep 29 14:13:20 crc kubenswrapper[4869]: I0929 14:13:20.254926 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9d39658-a34b-4541-be56-d2a215fa0c00" path="/var/lib/kubelet/pods/d9d39658-a34b-4541-be56-d2a215fa0c00/volumes" Sep 29 14:13:21 crc kubenswrapper[4869]: I0929 14:13:21.025520 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8vrtz"] Sep 29 14:13:21 crc kubenswrapper[4869]: I0929 14:13:21.037815 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8vrtz"] Sep 29 14:13:22 crc kubenswrapper[4869]: I0929 14:13:22.121255 4869 scope.go:117] "RemoveContainer" containerID="e07c85a0625b72e0247c8ef94f874760bd643b375f9e0ad2b0ec0becb1f875b2" Sep 29 14:13:22 crc kubenswrapper[4869]: I0929 14:13:22.170121 4869 scope.go:117] "RemoveContainer" containerID="5fe133537285bd1796148e21aaaefa911a982f50107d5f7868e4515c3fa6b8e6" Sep 29 14:13:22 crc kubenswrapper[4869]: I0929 14:13:22.196821 4869 scope.go:117] "RemoveContainer" containerID="7dc4147479864fbc36fde583a4a05c67a66870df6920b06020cef6996e86f127" Sep 29 14:13:22 crc kubenswrapper[4869]: I0929 14:13:22.234306 4869 scope.go:117] "RemoveContainer" containerID="986cf25b2326f69c3ca381493d6e256e0fecbacaeaa1d40aa16fc00927b8b791" Sep 29 14:13:22 crc kubenswrapper[4869]: I0929 14:13:22.254466 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50306168-e24b-46f0-805b-6e703ff45a13" path="/var/lib/kubelet/pods/50306168-e24b-46f0-805b-6e703ff45a13/volumes" Sep 29 14:13:22 crc kubenswrapper[4869]: I0929 14:13:22.290093 4869 scope.go:117] "RemoveContainer" containerID="3557ecc99a01dd817e3db7d621bf4160aa2abc0c90697f30bac6d211d95d1fbb" Sep 29 14:14:05 crc kubenswrapper[4869]: I0929 14:14:05.058675 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-dsc87"] Sep 29 14:14:05 crc kubenswrapper[4869]: I0929 14:14:05.069959 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-dsc87"] Sep 29 14:14:06 crc kubenswrapper[4869]: I0929 14:14:06.258914 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae" path="/var/lib/kubelet/pods/cc0ea1a0-86f5-4a85-8ae9-3241cd6dd8ae/volumes" Sep 29 14:14:22 crc kubenswrapper[4869]: I0929 14:14:22.402337 4869 scope.go:117] "RemoveContainer" containerID="54ad67e4a9f24eaa93d94751756b25ff2040da3c356af741a4674910cdd067ea" Sep 29 14:14:22 crc kubenswrapper[4869]: I0929 14:14:22.472963 4869 scope.go:117] "RemoveContainer" containerID="021a7036073c25eb2fe3e6e820153baa172a60756451645a5308c7f9903f2f19" Sep 29 14:14:50 crc kubenswrapper[4869]: I0929 14:14:50.657285 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:14:50 crc kubenswrapper[4869]: I0929 14:14:50.657859 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.140940 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str"] Sep 29 14:15:00 crc kubenswrapper[4869]: E0929 14:15:00.141908 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a2acbf6-f4da-420a-8325-a49b7550a6fe" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.141927 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a2acbf6-f4da-420a-8325-a49b7550a6fe" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.142126 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a2acbf6-f4da-420a-8325-a49b7550a6fe" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.142836 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.145542 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.146052 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.151379 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str"] Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.278810 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f70b334-c864-4221-b25a-fa532dcd6798-config-volume\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.279254 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f70b334-c864-4221-b25a-fa532dcd6798-secret-volume\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.279567 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk5nl\" (UniqueName: \"kubernetes.io/projected/7f70b334-c864-4221-b25a-fa532dcd6798-kube-api-access-wk5nl\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.381777 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk5nl\" (UniqueName: \"kubernetes.io/projected/7f70b334-c864-4221-b25a-fa532dcd6798-kube-api-access-wk5nl\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.381881 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f70b334-c864-4221-b25a-fa532dcd6798-config-volume\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.381989 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f70b334-c864-4221-b25a-fa532dcd6798-secret-volume\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.383024 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f70b334-c864-4221-b25a-fa532dcd6798-config-volume\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.388152 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f70b334-c864-4221-b25a-fa532dcd6798-secret-volume\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.398332 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk5nl\" (UniqueName: \"kubernetes.io/projected/7f70b334-c864-4221-b25a-fa532dcd6798-kube-api-access-wk5nl\") pod \"collect-profiles-29319255-x8str\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.463824 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:00 crc kubenswrapper[4869]: I0929 14:15:00.893638 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str"] Sep 29 14:15:01 crc kubenswrapper[4869]: I0929 14:15:01.246001 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" event={"ID":"7f70b334-c864-4221-b25a-fa532dcd6798","Type":"ContainerStarted","Data":"43797e3ad02dbed3d58c2c0222947b20d291830fca32928d2e996705cb27821d"} Sep 29 14:15:01 crc kubenswrapper[4869]: I0929 14:15:01.246269 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" event={"ID":"7f70b334-c864-4221-b25a-fa532dcd6798","Type":"ContainerStarted","Data":"9dc28fd8cb9d4afef53995871eff892a13bc16a343593157c85f63ad3b25f46f"} Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.256594 4869 generic.go:334] "Generic (PLEG): container finished" podID="7f70b334-c864-4221-b25a-fa532dcd6798" containerID="43797e3ad02dbed3d58c2c0222947b20d291830fca32928d2e996705cb27821d" exitCode=0 Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.256649 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" event={"ID":"7f70b334-c864-4221-b25a-fa532dcd6798","Type":"ContainerDied","Data":"43797e3ad02dbed3d58c2c0222947b20d291830fca32928d2e996705cb27821d"} Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.564271 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.736027 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk5nl\" (UniqueName: \"kubernetes.io/projected/7f70b334-c864-4221-b25a-fa532dcd6798-kube-api-access-wk5nl\") pod \"7f70b334-c864-4221-b25a-fa532dcd6798\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.736136 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f70b334-c864-4221-b25a-fa532dcd6798-secret-volume\") pod \"7f70b334-c864-4221-b25a-fa532dcd6798\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.736237 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f70b334-c864-4221-b25a-fa532dcd6798-config-volume\") pod \"7f70b334-c864-4221-b25a-fa532dcd6798\" (UID: \"7f70b334-c864-4221-b25a-fa532dcd6798\") " Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.737823 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f70b334-c864-4221-b25a-fa532dcd6798-config-volume" (OuterVolumeSpecName: "config-volume") pod "7f70b334-c864-4221-b25a-fa532dcd6798" (UID: "7f70b334-c864-4221-b25a-fa532dcd6798"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.743079 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f70b334-c864-4221-b25a-fa532dcd6798-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7f70b334-c864-4221-b25a-fa532dcd6798" (UID: "7f70b334-c864-4221-b25a-fa532dcd6798"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.743481 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f70b334-c864-4221-b25a-fa532dcd6798-kube-api-access-wk5nl" (OuterVolumeSpecName: "kube-api-access-wk5nl") pod "7f70b334-c864-4221-b25a-fa532dcd6798" (UID: "7f70b334-c864-4221-b25a-fa532dcd6798"). InnerVolumeSpecName "kube-api-access-wk5nl". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.839230 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk5nl\" (UniqueName: \"kubernetes.io/projected/7f70b334-c864-4221-b25a-fa532dcd6798-kube-api-access-wk5nl\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.839273 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f70b334-c864-4221-b25a-fa532dcd6798-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:02 crc kubenswrapper[4869]: I0929 14:15:02.839283 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f70b334-c864-4221-b25a-fa532dcd6798-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:03 crc kubenswrapper[4869]: I0929 14:15:03.272439 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" event={"ID":"7f70b334-c864-4221-b25a-fa532dcd6798","Type":"ContainerDied","Data":"9dc28fd8cb9d4afef53995871eff892a13bc16a343593157c85f63ad3b25f46f"} Sep 29 14:15:03 crc kubenswrapper[4869]: I0929 14:15:03.272494 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9dc28fd8cb9d4afef53995871eff892a13bc16a343593157c85f63ad3b25f46f" Sep 29 14:15:03 crc kubenswrapper[4869]: I0929 14:15:03.272516 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str" Sep 29 14:15:03 crc kubenswrapper[4869]: I0929 14:15:03.667038 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj"] Sep 29 14:15:03 crc kubenswrapper[4869]: I0929 14:15:03.675456 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319210-6mqqj"] Sep 29 14:15:04 crc kubenswrapper[4869]: I0929 14:15:04.256467 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d831b6b1-9e16-4bd3-88f7-7bed5f73206f" path="/var/lib/kubelet/pods/d831b6b1-9e16-4bd3-88f7-7bed5f73206f/volumes" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.359194 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tqtbw"] Sep 29 14:15:11 crc kubenswrapper[4869]: E0929 14:15:11.360228 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f70b334-c864-4221-b25a-fa532dcd6798" containerName="collect-profiles" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.360247 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f70b334-c864-4221-b25a-fa532dcd6798" containerName="collect-profiles" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.360501 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f70b334-c864-4221-b25a-fa532dcd6798" containerName="collect-profiles" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.362286 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.374417 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tqtbw"] Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.517703 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-utilities\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.517804 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-catalog-content\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.518039 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57m82\" (UniqueName: \"kubernetes.io/projected/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-kube-api-access-57m82\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.620320 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-utilities\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.620422 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-catalog-content\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.620464 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57m82\" (UniqueName: \"kubernetes.io/projected/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-kube-api-access-57m82\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.620871 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-utilities\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.620905 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-catalog-content\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.639276 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57m82\" (UniqueName: \"kubernetes.io/projected/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-kube-api-access-57m82\") pod \"community-operators-tqtbw\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:11 crc kubenswrapper[4869]: I0929 14:15:11.691008 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:12 crc kubenswrapper[4869]: I0929 14:15:12.216971 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tqtbw"] Sep 29 14:15:12 crc kubenswrapper[4869]: I0929 14:15:12.378711 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqtbw" event={"ID":"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09","Type":"ContainerStarted","Data":"836803feee95f72c4224bf41b028f6f86b0a75136f0b3d8d9211fe81a844c797"} Sep 29 14:15:13 crc kubenswrapper[4869]: I0929 14:15:13.388954 4869 generic.go:334] "Generic (PLEG): container finished" podID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerID="9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf" exitCode=0 Sep 29 14:15:13 crc kubenswrapper[4869]: I0929 14:15:13.388997 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqtbw" event={"ID":"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09","Type":"ContainerDied","Data":"9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf"} Sep 29 14:15:13 crc kubenswrapper[4869]: I0929 14:15:13.391051 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.419002 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqtbw" event={"ID":"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09","Type":"ContainerStarted","Data":"9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361"} Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.574346 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kc694"] Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.576264 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.589175 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kc694"] Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.733687 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-utilities\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.734119 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-catalog-content\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.734405 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z62lq\" (UniqueName: \"kubernetes.io/projected/06e495fa-cfe8-48d9-bb0e-03099eab256e-kube-api-access-z62lq\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.836687 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-utilities\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.836729 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-catalog-content\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.836831 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z62lq\" (UniqueName: \"kubernetes.io/projected/06e495fa-cfe8-48d9-bb0e-03099eab256e-kube-api-access-z62lq\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.837632 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-utilities\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.838150 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-catalog-content\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.859347 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z62lq\" (UniqueName: \"kubernetes.io/projected/06e495fa-cfe8-48d9-bb0e-03099eab256e-kube-api-access-z62lq\") pod \"redhat-operators-kc694\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:16 crc kubenswrapper[4869]: I0929 14:15:16.897985 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:17 crc kubenswrapper[4869]: I0929 14:15:17.222288 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kc694"] Sep 29 14:15:17 crc kubenswrapper[4869]: I0929 14:15:17.429503 4869 generic.go:334] "Generic (PLEG): container finished" podID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerID="9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361" exitCode=0 Sep 29 14:15:17 crc kubenswrapper[4869]: I0929 14:15:17.429564 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqtbw" event={"ID":"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09","Type":"ContainerDied","Data":"9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361"} Sep 29 14:15:17 crc kubenswrapper[4869]: I0929 14:15:17.431504 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc694" event={"ID":"06e495fa-cfe8-48d9-bb0e-03099eab256e","Type":"ContainerStarted","Data":"c4c9aae39f0f6031d13edead95aa1c7e986ba30d8510c1c3dfcb334f8871e7c5"} Sep 29 14:15:18 crc kubenswrapper[4869]: I0929 14:15:18.441288 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqtbw" event={"ID":"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09","Type":"ContainerStarted","Data":"615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7"} Sep 29 14:15:18 crc kubenswrapper[4869]: I0929 14:15:18.442988 4869 generic.go:334] "Generic (PLEG): container finished" podID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerID="0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508" exitCode=0 Sep 29 14:15:18 crc kubenswrapper[4869]: I0929 14:15:18.443022 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc694" event={"ID":"06e495fa-cfe8-48d9-bb0e-03099eab256e","Type":"ContainerDied","Data":"0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508"} Sep 29 14:15:18 crc kubenswrapper[4869]: I0929 14:15:18.466650 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tqtbw" podStartSLOduration=3.03905635 podStartE2EDuration="7.466628557s" podCreationTimestamp="2025-09-29 14:15:11 +0000 UTC" firstStartedPulling="2025-09-29 14:15:13.390783685 +0000 UTC m=+2039.831428005" lastFinishedPulling="2025-09-29 14:15:17.818355892 +0000 UTC m=+2044.259000212" observedRunningTime="2025-09-29 14:15:18.46480612 +0000 UTC m=+2044.905450440" watchObservedRunningTime="2025-09-29 14:15:18.466628557 +0000 UTC m=+2044.907272877" Sep 29 14:15:19 crc kubenswrapper[4869]: I0929 14:15:19.452714 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc694" event={"ID":"06e495fa-cfe8-48d9-bb0e-03099eab256e","Type":"ContainerStarted","Data":"6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db"} Sep 29 14:15:20 crc kubenswrapper[4869]: I0929 14:15:20.467702 4869 generic.go:334] "Generic (PLEG): container finished" podID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerID="6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db" exitCode=0 Sep 29 14:15:20 crc kubenswrapper[4869]: I0929 14:15:20.467747 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc694" event={"ID":"06e495fa-cfe8-48d9-bb0e-03099eab256e","Type":"ContainerDied","Data":"6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db"} Sep 29 14:15:20 crc kubenswrapper[4869]: I0929 14:15:20.657545 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:15:20 crc kubenswrapper[4869]: I0929 14:15:20.657635 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:15:21 crc kubenswrapper[4869]: I0929 14:15:21.692528 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:21 crc kubenswrapper[4869]: I0929 14:15:21.693153 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:21 crc kubenswrapper[4869]: I0929 14:15:21.738086 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:22 crc kubenswrapper[4869]: I0929 14:15:22.489314 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc694" event={"ID":"06e495fa-cfe8-48d9-bb0e-03099eab256e","Type":"ContainerStarted","Data":"ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48"} Sep 29 14:15:22 crc kubenswrapper[4869]: I0929 14:15:22.510986 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kc694" podStartSLOduration=4.010434559 podStartE2EDuration="6.510969369s" podCreationTimestamp="2025-09-29 14:15:16 +0000 UTC" firstStartedPulling="2025-09-29 14:15:18.445777491 +0000 UTC m=+2044.886421811" lastFinishedPulling="2025-09-29 14:15:20.946312301 +0000 UTC m=+2047.386956621" observedRunningTime="2025-09-29 14:15:22.50789597 +0000 UTC m=+2048.948540290" watchObservedRunningTime="2025-09-29 14:15:22.510969369 +0000 UTC m=+2048.951613689" Sep 29 14:15:22 crc kubenswrapper[4869]: I0929 14:15:22.554861 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:22 crc kubenswrapper[4869]: I0929 14:15:22.570697 4869 scope.go:117] "RemoveContainer" containerID="9479c97b4434f620057414013f2760510efbf0a2e43770f38169961cf760f17d" Sep 29 14:15:23 crc kubenswrapper[4869]: I0929 14:15:23.346260 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tqtbw"] Sep 29 14:15:24 crc kubenswrapper[4869]: I0929 14:15:24.505799 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tqtbw" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerName="registry-server" containerID="cri-o://615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7" gracePeriod=2 Sep 29 14:15:24 crc kubenswrapper[4869]: I0929 14:15:24.975388 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.129790 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-utilities\") pod \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.129898 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57m82\" (UniqueName: \"kubernetes.io/projected/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-kube-api-access-57m82\") pod \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.130146 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-catalog-content\") pod \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\" (UID: \"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09\") " Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.130555 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-utilities" (OuterVolumeSpecName: "utilities") pod "a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" (UID: "a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.141418 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-kube-api-access-57m82" (OuterVolumeSpecName: "kube-api-access-57m82") pod "a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" (UID: "a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09"). InnerVolumeSpecName "kube-api-access-57m82". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.177667 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" (UID: "a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.235911 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.235954 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57m82\" (UniqueName: \"kubernetes.io/projected/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-kube-api-access-57m82\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.235965 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.517417 4869 generic.go:334] "Generic (PLEG): container finished" podID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerID="615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7" exitCode=0 Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.517465 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqtbw" event={"ID":"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09","Type":"ContainerDied","Data":"615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7"} Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.517496 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqtbw" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.517516 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqtbw" event={"ID":"a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09","Type":"ContainerDied","Data":"836803feee95f72c4224bf41b028f6f86b0a75136f0b3d8d9211fe81a844c797"} Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.517534 4869 scope.go:117] "RemoveContainer" containerID="615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.548174 4869 scope.go:117] "RemoveContainer" containerID="9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.550909 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tqtbw"] Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.561391 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tqtbw"] Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.566715 4869 scope.go:117] "RemoveContainer" containerID="9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.614421 4869 scope.go:117] "RemoveContainer" containerID="615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7" Sep 29 14:15:25 crc kubenswrapper[4869]: E0929 14:15:25.615074 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7\": container with ID starting with 615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7 not found: ID does not exist" containerID="615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.615116 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7"} err="failed to get container status \"615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7\": rpc error: code = NotFound desc = could not find container \"615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7\": container with ID starting with 615e889f141ab687f06280caa69a028c7cae60bc1a2ae2452f400cd73c2569f7 not found: ID does not exist" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.615143 4869 scope.go:117] "RemoveContainer" containerID="9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361" Sep 29 14:15:25 crc kubenswrapper[4869]: E0929 14:15:25.615711 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361\": container with ID starting with 9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361 not found: ID does not exist" containerID="9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.615751 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361"} err="failed to get container status \"9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361\": rpc error: code = NotFound desc = could not find container \"9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361\": container with ID starting with 9a3edab800339fc011b21354eee0b4f4a90c04a879b04118eab678f047fb9361 not found: ID does not exist" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.615780 4869 scope.go:117] "RemoveContainer" containerID="9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf" Sep 29 14:15:25 crc kubenswrapper[4869]: E0929 14:15:25.616799 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf\": container with ID starting with 9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf not found: ID does not exist" containerID="9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf" Sep 29 14:15:25 crc kubenswrapper[4869]: I0929 14:15:25.616832 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf"} err="failed to get container status \"9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf\": rpc error: code = NotFound desc = could not find container \"9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf\": container with ID starting with 9c84729553d5ff7179b536e53dcaf150dfd23da1fa3254ad86104592afdddacf not found: ID does not exist" Sep 29 14:15:26 crc kubenswrapper[4869]: I0929 14:15:26.253236 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" path="/var/lib/kubelet/pods/a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09/volumes" Sep 29 14:15:26 crc kubenswrapper[4869]: I0929 14:15:26.898420 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:26 crc kubenswrapper[4869]: I0929 14:15:26.898482 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:26 crc kubenswrapper[4869]: I0929 14:15:26.973410 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:27 crc kubenswrapper[4869]: I0929 14:15:27.581079 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:28 crc kubenswrapper[4869]: I0929 14:15:28.350560 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kc694"] Sep 29 14:15:29 crc kubenswrapper[4869]: I0929 14:15:29.550940 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kc694" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerName="registry-server" containerID="cri-o://ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48" gracePeriod=2 Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.018974 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.127040 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z62lq\" (UniqueName: \"kubernetes.io/projected/06e495fa-cfe8-48d9-bb0e-03099eab256e-kube-api-access-z62lq\") pod \"06e495fa-cfe8-48d9-bb0e-03099eab256e\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.127135 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-utilities\") pod \"06e495fa-cfe8-48d9-bb0e-03099eab256e\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.127308 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-catalog-content\") pod \"06e495fa-cfe8-48d9-bb0e-03099eab256e\" (UID: \"06e495fa-cfe8-48d9-bb0e-03099eab256e\") " Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.128376 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-utilities" (OuterVolumeSpecName: "utilities") pod "06e495fa-cfe8-48d9-bb0e-03099eab256e" (UID: "06e495fa-cfe8-48d9-bb0e-03099eab256e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.128609 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.134379 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06e495fa-cfe8-48d9-bb0e-03099eab256e-kube-api-access-z62lq" (OuterVolumeSpecName: "kube-api-access-z62lq") pod "06e495fa-cfe8-48d9-bb0e-03099eab256e" (UID: "06e495fa-cfe8-48d9-bb0e-03099eab256e"). InnerVolumeSpecName "kube-api-access-z62lq". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.208832 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06e495fa-cfe8-48d9-bb0e-03099eab256e" (UID: "06e495fa-cfe8-48d9-bb0e-03099eab256e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.230803 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e495fa-cfe8-48d9-bb0e-03099eab256e-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.230837 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z62lq\" (UniqueName: \"kubernetes.io/projected/06e495fa-cfe8-48d9-bb0e-03099eab256e-kube-api-access-z62lq\") on node \"crc\" DevicePath \"\"" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.564548 4869 generic.go:334] "Generic (PLEG): container finished" podID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerID="ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48" exitCode=0 Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.564601 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc694" event={"ID":"06e495fa-cfe8-48d9-bb0e-03099eab256e","Type":"ContainerDied","Data":"ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48"} Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.564670 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc694" event={"ID":"06e495fa-cfe8-48d9-bb0e-03099eab256e","Type":"ContainerDied","Data":"c4c9aae39f0f6031d13edead95aa1c7e986ba30d8510c1c3dfcb334f8871e7c5"} Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.564690 4869 scope.go:117] "RemoveContainer" containerID="ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.564705 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kc694" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.594241 4869 scope.go:117] "RemoveContainer" containerID="6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.594805 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kc694"] Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.603814 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kc694"] Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.617339 4869 scope.go:117] "RemoveContainer" containerID="0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.659068 4869 scope.go:117] "RemoveContainer" containerID="ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48" Sep 29 14:15:30 crc kubenswrapper[4869]: E0929 14:15:30.659991 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48\": container with ID starting with ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48 not found: ID does not exist" containerID="ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.660030 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48"} err="failed to get container status \"ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48\": rpc error: code = NotFound desc = could not find container \"ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48\": container with ID starting with ce7aaa6e411bb5f0f0a0ddde80fd99dffb000af2bf8c27ac537e9bd5c82f7e48 not found: ID does not exist" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.660055 4869 scope.go:117] "RemoveContainer" containerID="6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db" Sep 29 14:15:30 crc kubenswrapper[4869]: E0929 14:15:30.660382 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db\": container with ID starting with 6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db not found: ID does not exist" containerID="6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.660479 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db"} err="failed to get container status \"6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db\": rpc error: code = NotFound desc = could not find container \"6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db\": container with ID starting with 6317f931fae88dec80362f2d00d41f8160084ffdb00f87a23d9f5ddf211ad1db not found: ID does not exist" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.660584 4869 scope.go:117] "RemoveContainer" containerID="0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508" Sep 29 14:15:30 crc kubenswrapper[4869]: E0929 14:15:30.660893 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508\": container with ID starting with 0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508 not found: ID does not exist" containerID="0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508" Sep 29 14:15:30 crc kubenswrapper[4869]: I0929 14:15:30.660931 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508"} err="failed to get container status \"0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508\": rpc error: code = NotFound desc = could not find container \"0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508\": container with ID starting with 0d0700bb08834095fef1cb423f6fd86243102d51ce83f0522a7be70c21d40508 not found: ID does not exist" Sep 29 14:15:32 crc kubenswrapper[4869]: I0929 14:15:32.255807 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" path="/var/lib/kubelet/pods/06e495fa-cfe8-48d9-bb0e-03099eab256e/volumes" Sep 29 14:15:50 crc kubenswrapper[4869]: I0929 14:15:50.657213 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:15:50 crc kubenswrapper[4869]: I0929 14:15:50.658154 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:15:50 crc kubenswrapper[4869]: I0929 14:15:50.658238 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:15:50 crc kubenswrapper[4869]: I0929 14:15:50.659564 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3ff83f4fdc5296b479a17be426540e9999710a227e77cb8f76f5fb8b3f7cdc5f"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:15:50 crc kubenswrapper[4869]: I0929 14:15:50.659666 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://3ff83f4fdc5296b479a17be426540e9999710a227e77cb8f76f5fb8b3f7cdc5f" gracePeriod=600 Sep 29 14:15:51 crc kubenswrapper[4869]: I0929 14:15:51.749481 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="3ff83f4fdc5296b479a17be426540e9999710a227e77cb8f76f5fb8b3f7cdc5f" exitCode=0 Sep 29 14:15:51 crc kubenswrapper[4869]: I0929 14:15:51.749557 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"3ff83f4fdc5296b479a17be426540e9999710a227e77cb8f76f5fb8b3f7cdc5f"} Sep 29 14:15:51 crc kubenswrapper[4869]: I0929 14:15:51.750643 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f"} Sep 29 14:15:51 crc kubenswrapper[4869]: I0929 14:15:51.750679 4869 scope.go:117] "RemoveContainer" containerID="dc4dc8b59f063ddd11b31dc0cec035879c5af1f80a507a8a8282428df08e4bac" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.593213 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nlfmn"] Sep 29 14:16:58 crc kubenswrapper[4869]: E0929 14:16:58.595073 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerName="registry-server" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.595092 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerName="registry-server" Sep 29 14:16:58 crc kubenswrapper[4869]: E0929 14:16:58.595161 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerName="registry-server" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.595172 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerName="registry-server" Sep 29 14:16:58 crc kubenswrapper[4869]: E0929 14:16:58.595185 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerName="extract-utilities" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.595192 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerName="extract-utilities" Sep 29 14:16:58 crc kubenswrapper[4869]: E0929 14:16:58.595208 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerName="extract-content" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.595242 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerName="extract-content" Sep 29 14:16:58 crc kubenswrapper[4869]: E0929 14:16:58.595271 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerName="extract-utilities" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.595279 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerName="extract-utilities" Sep 29 14:16:58 crc kubenswrapper[4869]: E0929 14:16:58.595327 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerName="extract-content" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.595337 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerName="extract-content" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.595672 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="a92fb2ee-6fb3-4d6b-a007-c8ba1c16cc09" containerName="registry-server" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.595713 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="06e495fa-cfe8-48d9-bb0e-03099eab256e" containerName="registry-server" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.600909 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.609827 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nlfmn"] Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.669706 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-utilities\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.669820 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-catalog-content\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.669929 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxr4j\" (UniqueName: \"kubernetes.io/projected/4161f125-bfac-46ed-9eed-ac27f0cc1f59-kube-api-access-sxr4j\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.771882 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-utilities\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.772002 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-catalog-content\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.772119 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxr4j\" (UniqueName: \"kubernetes.io/projected/4161f125-bfac-46ed-9eed-ac27f0cc1f59-kube-api-access-sxr4j\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.772772 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-utilities\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.772897 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-catalog-content\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.795570 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxr4j\" (UniqueName: \"kubernetes.io/projected/4161f125-bfac-46ed-9eed-ac27f0cc1f59-kube-api-access-sxr4j\") pod \"redhat-marketplace-nlfmn\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:58 crc kubenswrapper[4869]: I0929 14:16:58.950148 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:16:59 crc kubenswrapper[4869]: I0929 14:16:59.429224 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nlfmn"] Sep 29 14:17:00 crc kubenswrapper[4869]: I0929 14:17:00.378003 4869 generic.go:334] "Generic (PLEG): container finished" podID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerID="1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29" exitCode=0 Sep 29 14:17:00 crc kubenswrapper[4869]: I0929 14:17:00.378081 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nlfmn" event={"ID":"4161f125-bfac-46ed-9eed-ac27f0cc1f59","Type":"ContainerDied","Data":"1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29"} Sep 29 14:17:00 crc kubenswrapper[4869]: I0929 14:17:00.378273 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nlfmn" event={"ID":"4161f125-bfac-46ed-9eed-ac27f0cc1f59","Type":"ContainerStarted","Data":"30c5062dfe9f431d43045ac527de9a9e57bfb5ea6e715347784c626095c902f0"} Sep 29 14:17:02 crc kubenswrapper[4869]: I0929 14:17:02.399023 4869 generic.go:334] "Generic (PLEG): container finished" podID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerID="14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0" exitCode=0 Sep 29 14:17:02 crc kubenswrapper[4869]: I0929 14:17:02.399190 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nlfmn" event={"ID":"4161f125-bfac-46ed-9eed-ac27f0cc1f59","Type":"ContainerDied","Data":"14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0"} Sep 29 14:17:04 crc kubenswrapper[4869]: I0929 14:17:04.424765 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nlfmn" event={"ID":"4161f125-bfac-46ed-9eed-ac27f0cc1f59","Type":"ContainerStarted","Data":"5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff"} Sep 29 14:17:04 crc kubenswrapper[4869]: I0929 14:17:04.442735 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nlfmn" podStartSLOduration=3.741262962 podStartE2EDuration="6.442708132s" podCreationTimestamp="2025-09-29 14:16:58 +0000 UTC" firstStartedPulling="2025-09-29 14:17:00.379859656 +0000 UTC m=+2146.820503976" lastFinishedPulling="2025-09-29 14:17:03.081304826 +0000 UTC m=+2149.521949146" observedRunningTime="2025-09-29 14:17:04.441326626 +0000 UTC m=+2150.881970946" watchObservedRunningTime="2025-09-29 14:17:04.442708132 +0000 UTC m=+2150.883352452" Sep 29 14:17:08 crc kubenswrapper[4869]: I0929 14:17:08.950846 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:17:08 crc kubenswrapper[4869]: I0929 14:17:08.951301 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:17:09 crc kubenswrapper[4869]: I0929 14:17:09.003344 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:17:09 crc kubenswrapper[4869]: I0929 14:17:09.526897 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:17:09 crc kubenswrapper[4869]: I0929 14:17:09.582630 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nlfmn"] Sep 29 14:17:11 crc kubenswrapper[4869]: I0929 14:17:11.502357 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nlfmn" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerName="registry-server" containerID="cri-o://5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff" gracePeriod=2 Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.021311 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.042545 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxr4j\" (UniqueName: \"kubernetes.io/projected/4161f125-bfac-46ed-9eed-ac27f0cc1f59-kube-api-access-sxr4j\") pod \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.042694 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-catalog-content\") pod \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.045950 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-utilities\") pod \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\" (UID: \"4161f125-bfac-46ed-9eed-ac27f0cc1f59\") " Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.047680 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-utilities" (OuterVolumeSpecName: "utilities") pod "4161f125-bfac-46ed-9eed-ac27f0cc1f59" (UID: "4161f125-bfac-46ed-9eed-ac27f0cc1f59"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.053952 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4161f125-bfac-46ed-9eed-ac27f0cc1f59-kube-api-access-sxr4j" (OuterVolumeSpecName: "kube-api-access-sxr4j") pod "4161f125-bfac-46ed-9eed-ac27f0cc1f59" (UID: "4161f125-bfac-46ed-9eed-ac27f0cc1f59"). InnerVolumeSpecName "kube-api-access-sxr4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.060539 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4161f125-bfac-46ed-9eed-ac27f0cc1f59" (UID: "4161f125-bfac-46ed-9eed-ac27f0cc1f59"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.149261 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxr4j\" (UniqueName: \"kubernetes.io/projected/4161f125-bfac-46ed-9eed-ac27f0cc1f59-kube-api-access-sxr4j\") on node \"crc\" DevicePath \"\"" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.149329 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.149343 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4161f125-bfac-46ed-9eed-ac27f0cc1f59-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.516801 4869 generic.go:334] "Generic (PLEG): container finished" podID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerID="5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff" exitCode=0 Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.516867 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nlfmn" event={"ID":"4161f125-bfac-46ed-9eed-ac27f0cc1f59","Type":"ContainerDied","Data":"5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff"} Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.516906 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nlfmn" event={"ID":"4161f125-bfac-46ed-9eed-ac27f0cc1f59","Type":"ContainerDied","Data":"30c5062dfe9f431d43045ac527de9a9e57bfb5ea6e715347784c626095c902f0"} Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.516920 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nlfmn" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.516929 4869 scope.go:117] "RemoveContainer" containerID="5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.544481 4869 scope.go:117] "RemoveContainer" containerID="14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.550083 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nlfmn"] Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.559894 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nlfmn"] Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.570744 4869 scope.go:117] "RemoveContainer" containerID="1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.618994 4869 scope.go:117] "RemoveContainer" containerID="5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff" Sep 29 14:17:12 crc kubenswrapper[4869]: E0929 14:17:12.619777 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff\": container with ID starting with 5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff not found: ID does not exist" containerID="5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.619870 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff"} err="failed to get container status \"5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff\": rpc error: code = NotFound desc = could not find container \"5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff\": container with ID starting with 5ad54a30ba87c516fc162a9dbfe8a4eeab6f2ae8884c1216b86f56cfca5286ff not found: ID does not exist" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.619927 4869 scope.go:117] "RemoveContainer" containerID="14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0" Sep 29 14:17:12 crc kubenswrapper[4869]: E0929 14:17:12.620484 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0\": container with ID starting with 14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0 not found: ID does not exist" containerID="14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.620526 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0"} err="failed to get container status \"14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0\": rpc error: code = NotFound desc = could not find container \"14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0\": container with ID starting with 14603089f71d2de1da3778fa189b0a0324d72fcfcbfcacb32fd87b06230b33c0 not found: ID does not exist" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.620555 4869 scope.go:117] "RemoveContainer" containerID="1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29" Sep 29 14:17:12 crc kubenswrapper[4869]: E0929 14:17:12.620898 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29\": container with ID starting with 1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29 not found: ID does not exist" containerID="1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29" Sep 29 14:17:12 crc kubenswrapper[4869]: I0929 14:17:12.620935 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29"} err="failed to get container status \"1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29\": rpc error: code = NotFound desc = could not find container \"1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29\": container with ID starting with 1003d40c1b095bd6f1e12cc39c28428d7467426b29e1eec8356e0408a748fb29 not found: ID does not exist" Sep 29 14:17:14 crc kubenswrapper[4869]: I0929 14:17:14.253913 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" path="/var/lib/kubelet/pods/4161f125-bfac-46ed-9eed-ac27f0cc1f59/volumes" Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.427228 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.444546 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.451876 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.458581 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.464697 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mq2lb"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.471667 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.476861 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.483365 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-wc5xd"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.490519 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.496795 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.502450 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fbm8h"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.509273 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-bpq6s"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.516178 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-4gdvb"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.524197 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-tn5l9"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.531551 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8tpx7"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.543013 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c4cbz"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.550012 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.556745 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-sghc5"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.563175 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-584k2"] Sep 29 14:18:04 crc kubenswrapper[4869]: I0929 14:18:04.570229 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-sghc5"] Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.252557 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19478488-9f59-4a64-b3f9-184f6a259d06" path="/var/lib/kubelet/pods/19478488-9f59-4a64-b3f9-184f6a259d06/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.253396 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e18b32b-bc94-48bf-9042-6f3b03a56811" path="/var/lib/kubelet/pods/4e18b32b-bc94-48bf-9042-6f3b03a56811/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.253910 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6aa4be87-45c9-4708-823b-13d5786c6046" path="/var/lib/kubelet/pods/6aa4be87-45c9-4708-823b-13d5786c6046/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.254476 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9449b5c9-7a8b-44ab-86d4-17b6c4dca520" path="/var/lib/kubelet/pods/9449b5c9-7a8b-44ab-86d4-17b6c4dca520/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.255451 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a2acbf6-f4da-420a-8325-a49b7550a6fe" path="/var/lib/kubelet/pods/9a2acbf6-f4da-420a-8325-a49b7550a6fe/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.255986 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb4e617b-b904-4bd2-b84b-daf3d4265da1" path="/var/lib/kubelet/pods/cb4e617b-b904-4bd2-b84b-daf3d4265da1/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.256461 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9" path="/var/lib/kubelet/pods/e5dfbedc-2fc3-483b-bb1a-b4931f1f9db9/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.257354 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebab3154-a1b6-4616-a738-5e059b3e62b4" path="/var/lib/kubelet/pods/ebab3154-a1b6-4616-a738-5e059b3e62b4/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.257900 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f999ef7e-3672-46d6-b432-10b37fa93176" path="/var/lib/kubelet/pods/f999ef7e-3672-46d6-b432-10b37fa93176/volumes" Sep 29 14:18:06 crc kubenswrapper[4869]: I0929 14:18:06.258434 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8" path="/var/lib/kubelet/pods/fdb5e4cf-aceb-4bc3-91b5-e7909f699dc8/volumes" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.640963 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4"] Sep 29 14:18:16 crc kubenswrapper[4869]: E0929 14:18:16.642287 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerName="extract-utilities" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.642306 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerName="extract-utilities" Sep 29 14:18:16 crc kubenswrapper[4869]: E0929 14:18:16.642326 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerName="extract-content" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.642334 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerName="extract-content" Sep 29 14:18:16 crc kubenswrapper[4869]: E0929 14:18:16.642373 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerName="registry-server" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.642381 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerName="registry-server" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.642650 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4161f125-bfac-46ed-9eed-ac27f0cc1f59" containerName="registry-server" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.643728 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.646773 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.647099 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.647442 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.647925 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.648746 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.657473 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4"] Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.820270 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t269t\" (UniqueName: \"kubernetes.io/projected/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-kube-api-access-t269t\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.820369 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.820421 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.820480 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.820548 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.922304 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t269t\" (UniqueName: \"kubernetes.io/projected/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-kube-api-access-t269t\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.922391 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.922428 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.922490 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.922549 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.929006 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.929146 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.930090 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.931047 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.940253 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t269t\" (UniqueName: \"kubernetes.io/projected/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-kube-api-access-t269t\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:16 crc kubenswrapper[4869]: I0929 14:18:16.979200 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:17 crc kubenswrapper[4869]: I0929 14:18:17.516080 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4"] Sep 29 14:18:18 crc kubenswrapper[4869]: I0929 14:18:18.063847 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" event={"ID":"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f","Type":"ContainerStarted","Data":"f16e33c66e13afcb150aebd8d6d9dbbb0207ca7e41e943b46887a1b141cdae9d"} Sep 29 14:18:19 crc kubenswrapper[4869]: I0929 14:18:19.073253 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" event={"ID":"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f","Type":"ContainerStarted","Data":"625934432d44d4baf53b5f6b04fe808caf3c58a39fffca6d32b0623d58c2b58f"} Sep 29 14:18:19 crc kubenswrapper[4869]: I0929 14:18:19.098690 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" podStartSLOduration=2.625686136 podStartE2EDuration="3.098669107s" podCreationTimestamp="2025-09-29 14:18:16 +0000 UTC" firstStartedPulling="2025-09-29 14:18:17.521951075 +0000 UTC m=+2223.962595395" lastFinishedPulling="2025-09-29 14:18:17.994934046 +0000 UTC m=+2224.435578366" observedRunningTime="2025-09-29 14:18:19.091363467 +0000 UTC m=+2225.532007797" watchObservedRunningTime="2025-09-29 14:18:19.098669107 +0000 UTC m=+2225.539313447" Sep 29 14:18:20 crc kubenswrapper[4869]: I0929 14:18:20.657320 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:18:20 crc kubenswrapper[4869]: I0929 14:18:20.657534 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:18:22 crc kubenswrapper[4869]: I0929 14:18:22.718625 4869 scope.go:117] "RemoveContainer" containerID="c13d29d7e28cf7d5c049adc48754f3e5ec76c5a2d71f022e04ccf29a61bd2043" Sep 29 14:18:22 crc kubenswrapper[4869]: I0929 14:18:22.774449 4869 scope.go:117] "RemoveContainer" containerID="2d2e8ee1ff2029fa6c26d37397b3f4a17ec1cd344f992cba9612d7371cc25e0d" Sep 29 14:18:22 crc kubenswrapper[4869]: I0929 14:18:22.808559 4869 scope.go:117] "RemoveContainer" containerID="39922a592ef8be57b48593cb5e9ad5659c195ac5ef1f90fd43c15c50ce5fc5f6" Sep 29 14:18:22 crc kubenswrapper[4869]: I0929 14:18:22.851215 4869 scope.go:117] "RemoveContainer" containerID="783fd8a8b004bf601aa3ffc731620391a0b288e90ba385a622bc469bcc4f4f91" Sep 29 14:18:22 crc kubenswrapper[4869]: I0929 14:18:22.895643 4869 scope.go:117] "RemoveContainer" containerID="7174425c78c9158e1b2794ea7445f3d3c0105b823ebba099135cf89c13d5101d" Sep 29 14:18:22 crc kubenswrapper[4869]: I0929 14:18:22.954588 4869 scope.go:117] "RemoveContainer" containerID="36b7d9d5a9700938d1563870424ec500d98cf311151a5d884b3e2c521ce828c4" Sep 29 14:18:23 crc kubenswrapper[4869]: I0929 14:18:23.025590 4869 scope.go:117] "RemoveContainer" containerID="e5c1c31eeb7183ccc2a3e1b302c454abdf07b06cd1de5105eaefab3f0faa175f" Sep 29 14:18:23 crc kubenswrapper[4869]: I0929 14:18:23.070131 4869 scope.go:117] "RemoveContainer" containerID="abea5017b835abc0a0ebcfcb98d8f1e648bbad419cac12a52c26bb1987b709fe" Sep 29 14:18:29 crc kubenswrapper[4869]: I0929 14:18:29.206213 4869 generic.go:334] "Generic (PLEG): container finished" podID="aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" containerID="625934432d44d4baf53b5f6b04fe808caf3c58a39fffca6d32b0623d58c2b58f" exitCode=0 Sep 29 14:18:29 crc kubenswrapper[4869]: I0929 14:18:29.206322 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" event={"ID":"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f","Type":"ContainerDied","Data":"625934432d44d4baf53b5f6b04fe808caf3c58a39fffca6d32b0623d58c2b58f"} Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.672899 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.709137 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ceph\") pod \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.709465 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t269t\" (UniqueName: \"kubernetes.io/projected/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-kube-api-access-t269t\") pod \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.709634 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ssh-key\") pod \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.709722 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-repo-setup-combined-ca-bundle\") pod \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.709836 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-inventory\") pod \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\" (UID: \"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f\") " Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.716541 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-kube-api-access-t269t" (OuterVolumeSpecName: "kube-api-access-t269t") pod "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" (UID: "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f"). InnerVolumeSpecName "kube-api-access-t269t". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.718710 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ceph" (OuterVolumeSpecName: "ceph") pod "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" (UID: "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.722283 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" (UID: "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.743673 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" (UID: "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.744275 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-inventory" (OuterVolumeSpecName: "inventory") pod "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" (UID: "aaba9b54-27f6-4b48-a3fb-63ed25d6b93f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.812348 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.812391 4869 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.812404 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.812413 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:18:30 crc kubenswrapper[4869]: I0929 14:18:30.812423 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t269t\" (UniqueName: \"kubernetes.io/projected/aaba9b54-27f6-4b48-a3fb-63ed25d6b93f-kube-api-access-t269t\") on node \"crc\" DevicePath \"\"" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.227752 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" event={"ID":"aaba9b54-27f6-4b48-a3fb-63ed25d6b93f","Type":"ContainerDied","Data":"f16e33c66e13afcb150aebd8d6d9dbbb0207ca7e41e943b46887a1b141cdae9d"} Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.227810 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f16e33c66e13afcb150aebd8d6d9dbbb0207ca7e41e943b46887a1b141cdae9d" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.228549 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.319724 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67"] Sep 29 14:18:31 crc kubenswrapper[4869]: E0929 14:18:31.320221 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.320241 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.320477 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="aaba9b54-27f6-4b48-a3fb-63ed25d6b93f" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.321497 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.323760 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.327586 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.328866 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.329178 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.330099 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.344125 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67"] Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.424069 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.424165 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkhz9\" (UniqueName: \"kubernetes.io/projected/cfae165c-93c0-48bb-8106-ec1f4f85ce17-kube-api-access-zkhz9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.424207 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.424245 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.424265 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.526580 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkhz9\" (UniqueName: \"kubernetes.io/projected/cfae165c-93c0-48bb-8106-ec1f4f85ce17-kube-api-access-zkhz9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.526679 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.526722 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.526745 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.526820 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.533299 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.533561 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.533595 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.541253 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.543171 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkhz9\" (UniqueName: \"kubernetes.io/projected/cfae165c-93c0-48bb-8106-ec1f4f85ce17-kube-api-access-zkhz9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:31 crc kubenswrapper[4869]: I0929 14:18:31.641716 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:18:32 crc kubenswrapper[4869]: I0929 14:18:32.158020 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67"] Sep 29 14:18:32 crc kubenswrapper[4869]: W0929 14:18:32.166106 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcfae165c_93c0_48bb_8106_ec1f4f85ce17.slice/crio-4eba4b2810210f5820fc2d9b2be3146ce5bfd3b8c62d2945ea42ba45770937ec WatchSource:0}: Error finding container 4eba4b2810210f5820fc2d9b2be3146ce5bfd3b8c62d2945ea42ba45770937ec: Status 404 returned error can't find the container with id 4eba4b2810210f5820fc2d9b2be3146ce5bfd3b8c62d2945ea42ba45770937ec Sep 29 14:18:32 crc kubenswrapper[4869]: I0929 14:18:32.236261 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" event={"ID":"cfae165c-93c0-48bb-8106-ec1f4f85ce17","Type":"ContainerStarted","Data":"4eba4b2810210f5820fc2d9b2be3146ce5bfd3b8c62d2945ea42ba45770937ec"} Sep 29 14:18:33 crc kubenswrapper[4869]: I0929 14:18:33.249080 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" event={"ID":"cfae165c-93c0-48bb-8106-ec1f4f85ce17","Type":"ContainerStarted","Data":"bdd5932c46fff2185ee87680fbc5cc5bc5015672a3fc6cc4ba13349fbc668d5c"} Sep 29 14:18:33 crc kubenswrapper[4869]: I0929 14:18:33.269507 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" podStartSLOduration=1.715319391 podStartE2EDuration="2.269489962s" podCreationTimestamp="2025-09-29 14:18:31 +0000 UTC" firstStartedPulling="2025-09-29 14:18:32.168867192 +0000 UTC m=+2238.609511512" lastFinishedPulling="2025-09-29 14:18:32.723037773 +0000 UTC m=+2239.163682083" observedRunningTime="2025-09-29 14:18:33.264230755 +0000 UTC m=+2239.704875075" watchObservedRunningTime="2025-09-29 14:18:33.269489962 +0000 UTC m=+2239.710134272" Sep 29 14:18:50 crc kubenswrapper[4869]: I0929 14:18:50.656649 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:18:50 crc kubenswrapper[4869]: I0929 14:18:50.657186 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:19:20 crc kubenswrapper[4869]: I0929 14:19:20.656502 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:19:20 crc kubenswrapper[4869]: I0929 14:19:20.657247 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:19:20 crc kubenswrapper[4869]: I0929 14:19:20.657285 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:19:20 crc kubenswrapper[4869]: I0929 14:19:20.660690 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:19:20 crc kubenswrapper[4869]: I0929 14:19:20.661003 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" gracePeriod=600 Sep 29 14:19:20 crc kubenswrapper[4869]: E0929 14:19:20.801280 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:19:21 crc kubenswrapper[4869]: I0929 14:19:21.675297 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" exitCode=0 Sep 29 14:19:21 crc kubenswrapper[4869]: I0929 14:19:21.675356 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f"} Sep 29 14:19:21 crc kubenswrapper[4869]: I0929 14:19:21.675397 4869 scope.go:117] "RemoveContainer" containerID="3ff83f4fdc5296b479a17be426540e9999710a227e77cb8f76f5fb8b3f7cdc5f" Sep 29 14:19:21 crc kubenswrapper[4869]: I0929 14:19:21.676751 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:19:21 crc kubenswrapper[4869]: E0929 14:19:21.677254 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:19:23 crc kubenswrapper[4869]: I0929 14:19:23.258896 4869 scope.go:117] "RemoveContainer" containerID="11dd966e749c4b00fe02eba325498b3c68b1bb5f6f3e1482d3a46817296a79a9" Sep 29 14:19:23 crc kubenswrapper[4869]: I0929 14:19:23.300276 4869 scope.go:117] "RemoveContainer" containerID="6519701610b59e8a4e484deda1c386b486a671d975703883fc57dfee9804c604" Sep 29 14:19:36 crc kubenswrapper[4869]: I0929 14:19:36.242836 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:19:36 crc kubenswrapper[4869]: E0929 14:19:36.243657 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:19:47 crc kubenswrapper[4869]: I0929 14:19:47.241693 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:19:47 crc kubenswrapper[4869]: E0929 14:19:47.242346 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:20:01 crc kubenswrapper[4869]: I0929 14:20:01.242631 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:20:01 crc kubenswrapper[4869]: E0929 14:20:01.243910 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:20:06 crc kubenswrapper[4869]: I0929 14:20:06.054422 4869 generic.go:334] "Generic (PLEG): container finished" podID="cfae165c-93c0-48bb-8106-ec1f4f85ce17" containerID="bdd5932c46fff2185ee87680fbc5cc5bc5015672a3fc6cc4ba13349fbc668d5c" exitCode=0 Sep 29 14:20:06 crc kubenswrapper[4869]: I0929 14:20:06.054631 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" event={"ID":"cfae165c-93c0-48bb-8106-ec1f4f85ce17","Type":"ContainerDied","Data":"bdd5932c46fff2185ee87680fbc5cc5bc5015672a3fc6cc4ba13349fbc668d5c"} Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.481968 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.647670 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-bootstrap-combined-ca-bundle\") pod \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.647873 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ssh-key\") pod \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.647912 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-inventory\") pod \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.647996 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ceph\") pod \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.648081 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkhz9\" (UniqueName: \"kubernetes.io/projected/cfae165c-93c0-48bb-8106-ec1f4f85ce17-kube-api-access-zkhz9\") pod \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\" (UID: \"cfae165c-93c0-48bb-8106-ec1f4f85ce17\") " Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.653992 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "cfae165c-93c0-48bb-8106-ec1f4f85ce17" (UID: "cfae165c-93c0-48bb-8106-ec1f4f85ce17"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.654818 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ceph" (OuterVolumeSpecName: "ceph") pod "cfae165c-93c0-48bb-8106-ec1f4f85ce17" (UID: "cfae165c-93c0-48bb-8106-ec1f4f85ce17"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.655972 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfae165c-93c0-48bb-8106-ec1f4f85ce17-kube-api-access-zkhz9" (OuterVolumeSpecName: "kube-api-access-zkhz9") pod "cfae165c-93c0-48bb-8106-ec1f4f85ce17" (UID: "cfae165c-93c0-48bb-8106-ec1f4f85ce17"). InnerVolumeSpecName "kube-api-access-zkhz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.674796 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cfae165c-93c0-48bb-8106-ec1f4f85ce17" (UID: "cfae165c-93c0-48bb-8106-ec1f4f85ce17"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.681146 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-inventory" (OuterVolumeSpecName: "inventory") pod "cfae165c-93c0-48bb-8106-ec1f4f85ce17" (UID: "cfae165c-93c0-48bb-8106-ec1f4f85ce17"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.750905 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.750946 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.750955 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.750964 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkhz9\" (UniqueName: \"kubernetes.io/projected/cfae165c-93c0-48bb-8106-ec1f4f85ce17-kube-api-access-zkhz9\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:07 crc kubenswrapper[4869]: I0929 14:20:07.750977 4869 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfae165c-93c0-48bb-8106-ec1f4f85ce17-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.075374 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" event={"ID":"cfae165c-93c0-48bb-8106-ec1f4f85ce17","Type":"ContainerDied","Data":"4eba4b2810210f5820fc2d9b2be3146ce5bfd3b8c62d2945ea42ba45770937ec"} Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.075440 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4eba4b2810210f5820fc2d9b2be3146ce5bfd3b8c62d2945ea42ba45770937ec" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.075437 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.170900 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll"] Sep 29 14:20:08 crc kubenswrapper[4869]: E0929 14:20:08.171308 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfae165c-93c0-48bb-8106-ec1f4f85ce17" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.171323 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfae165c-93c0-48bb-8106-ec1f4f85ce17" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.171536 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfae165c-93c0-48bb-8106-ec1f4f85ce17" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.172228 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.174952 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.174985 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.176166 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.176474 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.176593 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.191726 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll"] Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.258781 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.258913 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.258977 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.259059 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmvkf\" (UniqueName: \"kubernetes.io/projected/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-kube-api-access-lmvkf\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.361439 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmvkf\" (UniqueName: \"kubernetes.io/projected/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-kube-api-access-lmvkf\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.361955 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.362009 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.362083 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.365958 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.365986 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.368311 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.380077 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmvkf\" (UniqueName: \"kubernetes.io/projected/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-kube-api-access-lmvkf\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-plkll\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:08 crc kubenswrapper[4869]: I0929 14:20:08.495026 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:09 crc kubenswrapper[4869]: I0929 14:20:09.048126 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll"] Sep 29 14:20:09 crc kubenswrapper[4869]: I0929 14:20:09.084091 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" event={"ID":"b4aef40c-231d-4e13-a0b7-c8e65c69ce91","Type":"ContainerStarted","Data":"59ce00440f5a2eee6e5f948bf8d4726dad9d70469a97415ac1ca8da4db0c5e05"} Sep 29 14:20:11 crc kubenswrapper[4869]: I0929 14:20:11.100687 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" event={"ID":"b4aef40c-231d-4e13-a0b7-c8e65c69ce91","Type":"ContainerStarted","Data":"33bf575d1c065a7263433cb41b49ea3d556dec93dcbce3bcfd8dfc78ac9ba2b2"} Sep 29 14:20:11 crc kubenswrapper[4869]: I0929 14:20:11.120205 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" podStartSLOduration=2.102613506 podStartE2EDuration="3.120185669s" podCreationTimestamp="2025-09-29 14:20:08 +0000 UTC" firstStartedPulling="2025-09-29 14:20:09.04294895 +0000 UTC m=+2335.483593270" lastFinishedPulling="2025-09-29 14:20:10.060521113 +0000 UTC m=+2336.501165433" observedRunningTime="2025-09-29 14:20:11.114345188 +0000 UTC m=+2337.554989508" watchObservedRunningTime="2025-09-29 14:20:11.120185669 +0000 UTC m=+2337.560829989" Sep 29 14:20:14 crc kubenswrapper[4869]: I0929 14:20:14.247529 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:20:14 crc kubenswrapper[4869]: E0929 14:20:14.248313 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:20:25 crc kubenswrapper[4869]: I0929 14:20:25.241771 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:20:25 crc kubenswrapper[4869]: E0929 14:20:25.242563 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:20:35 crc kubenswrapper[4869]: I0929 14:20:35.306036 4869 generic.go:334] "Generic (PLEG): container finished" podID="b4aef40c-231d-4e13-a0b7-c8e65c69ce91" containerID="33bf575d1c065a7263433cb41b49ea3d556dec93dcbce3bcfd8dfc78ac9ba2b2" exitCode=0 Sep 29 14:20:35 crc kubenswrapper[4869]: I0929 14:20:35.306131 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" event={"ID":"b4aef40c-231d-4e13-a0b7-c8e65c69ce91","Type":"ContainerDied","Data":"33bf575d1c065a7263433cb41b49ea3d556dec93dcbce3bcfd8dfc78ac9ba2b2"} Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.742875 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.918215 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmvkf\" (UniqueName: \"kubernetes.io/projected/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-kube-api-access-lmvkf\") pod \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.918314 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ssh-key\") pod \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.918356 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-inventory\") pod \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.918520 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ceph\") pod \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\" (UID: \"b4aef40c-231d-4e13-a0b7-c8e65c69ce91\") " Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.925580 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ceph" (OuterVolumeSpecName: "ceph") pod "b4aef40c-231d-4e13-a0b7-c8e65c69ce91" (UID: "b4aef40c-231d-4e13-a0b7-c8e65c69ce91"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.925808 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-kube-api-access-lmvkf" (OuterVolumeSpecName: "kube-api-access-lmvkf") pod "b4aef40c-231d-4e13-a0b7-c8e65c69ce91" (UID: "b4aef40c-231d-4e13-a0b7-c8e65c69ce91"). InnerVolumeSpecName "kube-api-access-lmvkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.946174 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-inventory" (OuterVolumeSpecName: "inventory") pod "b4aef40c-231d-4e13-a0b7-c8e65c69ce91" (UID: "b4aef40c-231d-4e13-a0b7-c8e65c69ce91"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:36 crc kubenswrapper[4869]: I0929 14:20:36.946842 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b4aef40c-231d-4e13-a0b7-c8e65c69ce91" (UID: "b4aef40c-231d-4e13-a0b7-c8e65c69ce91"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.020750 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.020802 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmvkf\" (UniqueName: \"kubernetes.io/projected/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-kube-api-access-lmvkf\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.020814 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.020824 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4aef40c-231d-4e13-a0b7-c8e65c69ce91-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.241998 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:20:37 crc kubenswrapper[4869]: E0929 14:20:37.242285 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.324145 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" event={"ID":"b4aef40c-231d-4e13-a0b7-c8e65c69ce91","Type":"ContainerDied","Data":"59ce00440f5a2eee6e5f948bf8d4726dad9d70469a97415ac1ca8da4db0c5e05"} Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.324196 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-plkll" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.324205 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="59ce00440f5a2eee6e5f948bf8d4726dad9d70469a97415ac1ca8da4db0c5e05" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.404385 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f"] Sep 29 14:20:37 crc kubenswrapper[4869]: E0929 14:20:37.404799 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4aef40c-231d-4e13-a0b7-c8e65c69ce91" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.404819 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4aef40c-231d-4e13-a0b7-c8e65c69ce91" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.404983 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4aef40c-231d-4e13-a0b7-c8e65c69ce91" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.405663 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.409550 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.410000 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.410078 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.410083 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.410227 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.418153 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f"] Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.528354 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.528444 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.528487 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2f4b\" (UniqueName: \"kubernetes.io/projected/e5e5263f-9a85-4a85-bfb5-20dea2039fad-kube-api-access-j2f4b\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.528623 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.629902 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.630005 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.630088 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.630145 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2f4b\" (UniqueName: \"kubernetes.io/projected/e5e5263f-9a85-4a85-bfb5-20dea2039fad-kube-api-access-j2f4b\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.633917 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.633937 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.634178 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.645984 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2f4b\" (UniqueName: \"kubernetes.io/projected/e5e5263f-9a85-4a85-bfb5-20dea2039fad-kube-api-access-j2f4b\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-chn5f\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:37 crc kubenswrapper[4869]: I0929 14:20:37.726194 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:38 crc kubenswrapper[4869]: I0929 14:20:38.209060 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f"] Sep 29 14:20:38 crc kubenswrapper[4869]: I0929 14:20:38.216404 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:20:38 crc kubenswrapper[4869]: I0929 14:20:38.332195 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" event={"ID":"e5e5263f-9a85-4a85-bfb5-20dea2039fad","Type":"ContainerStarted","Data":"14dc059b35c9641b5708f8af6b3c95eb6a87d905a418a9bec571ad40f49f2240"} Sep 29 14:20:39 crc kubenswrapper[4869]: I0929 14:20:39.342662 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" event={"ID":"e5e5263f-9a85-4a85-bfb5-20dea2039fad","Type":"ContainerStarted","Data":"5c809a490086c1b520fb3e5262373fa35c1bb5311573a108ca2c74388ba14d50"} Sep 29 14:20:39 crc kubenswrapper[4869]: I0929 14:20:39.365386 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" podStartSLOduration=1.879012981 podStartE2EDuration="2.365346556s" podCreationTimestamp="2025-09-29 14:20:37 +0000 UTC" firstStartedPulling="2025-09-29 14:20:38.216184038 +0000 UTC m=+2364.656828358" lastFinishedPulling="2025-09-29 14:20:38.702517613 +0000 UTC m=+2365.143161933" observedRunningTime="2025-09-29 14:20:39.36166499 +0000 UTC m=+2365.802309310" watchObservedRunningTime="2025-09-29 14:20:39.365346556 +0000 UTC m=+2365.805990896" Sep 29 14:20:44 crc kubenswrapper[4869]: I0929 14:20:44.382992 4869 generic.go:334] "Generic (PLEG): container finished" podID="e5e5263f-9a85-4a85-bfb5-20dea2039fad" containerID="5c809a490086c1b520fb3e5262373fa35c1bb5311573a108ca2c74388ba14d50" exitCode=0 Sep 29 14:20:44 crc kubenswrapper[4869]: I0929 14:20:44.383080 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" event={"ID":"e5e5263f-9a85-4a85-bfb5-20dea2039fad","Type":"ContainerDied","Data":"5c809a490086c1b520fb3e5262373fa35c1bb5311573a108ca2c74388ba14d50"} Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.804333 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.896208 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ceph\") pod \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.896349 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-inventory\") pod \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.896440 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2f4b\" (UniqueName: \"kubernetes.io/projected/e5e5263f-9a85-4a85-bfb5-20dea2039fad-kube-api-access-j2f4b\") pod \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.896519 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ssh-key\") pod \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\" (UID: \"e5e5263f-9a85-4a85-bfb5-20dea2039fad\") " Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.903146 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5e5263f-9a85-4a85-bfb5-20dea2039fad-kube-api-access-j2f4b" (OuterVolumeSpecName: "kube-api-access-j2f4b") pod "e5e5263f-9a85-4a85-bfb5-20dea2039fad" (UID: "e5e5263f-9a85-4a85-bfb5-20dea2039fad"). InnerVolumeSpecName "kube-api-access-j2f4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.903405 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ceph" (OuterVolumeSpecName: "ceph") pod "e5e5263f-9a85-4a85-bfb5-20dea2039fad" (UID: "e5e5263f-9a85-4a85-bfb5-20dea2039fad"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.929953 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-inventory" (OuterVolumeSpecName: "inventory") pod "e5e5263f-9a85-4a85-bfb5-20dea2039fad" (UID: "e5e5263f-9a85-4a85-bfb5-20dea2039fad"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.930911 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e5e5263f-9a85-4a85-bfb5-20dea2039fad" (UID: "e5e5263f-9a85-4a85-bfb5-20dea2039fad"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.999206 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2f4b\" (UniqueName: \"kubernetes.io/projected/e5e5263f-9a85-4a85-bfb5-20dea2039fad-kube-api-access-j2f4b\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.999247 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.999260 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:45 crc kubenswrapper[4869]: I0929 14:20:45.999270 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5e5263f-9a85-4a85-bfb5-20dea2039fad-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.409774 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" event={"ID":"e5e5263f-9a85-4a85-bfb5-20dea2039fad","Type":"ContainerDied","Data":"14dc059b35c9641b5708f8af6b3c95eb6a87d905a418a9bec571ad40f49f2240"} Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.409821 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14dc059b35c9641b5708f8af6b3c95eb6a87d905a418a9bec571ad40f49f2240" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.409827 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-chn5f" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.476434 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd"] Sep 29 14:20:46 crc kubenswrapper[4869]: E0929 14:20:46.476839 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5e5263f-9a85-4a85-bfb5-20dea2039fad" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.476855 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5e5263f-9a85-4a85-bfb5-20dea2039fad" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.477051 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5e5263f-9a85-4a85-bfb5-20dea2039fad" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.477747 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.481191 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.481388 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.481549 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.482660 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.488190 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd"] Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.489579 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.608765 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w2q2\" (UniqueName: \"kubernetes.io/projected/c2d2928f-faba-4b25-90fb-dcaa88a3b515-kube-api-access-4w2q2\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.609375 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.609507 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.609665 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.711885 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w2q2\" (UniqueName: \"kubernetes.io/projected/c2d2928f-faba-4b25-90fb-dcaa88a3b515-kube-api-access-4w2q2\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.711990 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.712042 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.712110 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.717020 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.717783 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.718262 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.729229 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w2q2\" (UniqueName: \"kubernetes.io/projected/c2d2928f-faba-4b25-90fb-dcaa88a3b515-kube-api-access-4w2q2\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v2crd\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:46 crc kubenswrapper[4869]: I0929 14:20:46.812943 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:20:47 crc kubenswrapper[4869]: I0929 14:20:47.312811 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd"] Sep 29 14:20:47 crc kubenswrapper[4869]: I0929 14:20:47.418170 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" event={"ID":"c2d2928f-faba-4b25-90fb-dcaa88a3b515","Type":"ContainerStarted","Data":"f91bd2baad2ad79b89aac8dc1ce7aec43e6b7c5771e8cbb8b793c28d38317d67"} Sep 29 14:20:48 crc kubenswrapper[4869]: I0929 14:20:48.241542 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:20:48 crc kubenswrapper[4869]: E0929 14:20:48.242200 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:20:48 crc kubenswrapper[4869]: I0929 14:20:48.425998 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" event={"ID":"c2d2928f-faba-4b25-90fb-dcaa88a3b515","Type":"ContainerStarted","Data":"520909172eaf63591d92e75ab15f13e44cae14f1b195a11636b8c4ab647c6672"} Sep 29 14:20:48 crc kubenswrapper[4869]: I0929 14:20:48.439264 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" podStartSLOduration=1.748757446 podStartE2EDuration="2.439245457s" podCreationTimestamp="2025-09-29 14:20:46 +0000 UTC" firstStartedPulling="2025-09-29 14:20:47.316107004 +0000 UTC m=+2373.756751314" lastFinishedPulling="2025-09-29 14:20:48.006595015 +0000 UTC m=+2374.447239325" observedRunningTime="2025-09-29 14:20:48.438434636 +0000 UTC m=+2374.879078966" watchObservedRunningTime="2025-09-29 14:20:48.439245457 +0000 UTC m=+2374.879889787" Sep 29 14:21:03 crc kubenswrapper[4869]: I0929 14:21:03.242765 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:21:03 crc kubenswrapper[4869]: E0929 14:21:03.244042 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.199102 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vwdzz"] Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.202454 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.210187 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vwdzz"] Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.304656 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89w7g\" (UniqueName: \"kubernetes.io/projected/12ee57ef-16b3-43d7-81ed-353c5922e253-kube-api-access-89w7g\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.304772 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-utilities\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.304828 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-catalog-content\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.406795 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89w7g\" (UniqueName: \"kubernetes.io/projected/12ee57ef-16b3-43d7-81ed-353c5922e253-kube-api-access-89w7g\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.406928 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-utilities\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.406973 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-catalog-content\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.407717 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-catalog-content\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.407813 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-utilities\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.432740 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89w7g\" (UniqueName: \"kubernetes.io/projected/12ee57ef-16b3-43d7-81ed-353c5922e253-kube-api-access-89w7g\") pod \"certified-operators-vwdzz\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:10 crc kubenswrapper[4869]: I0929 14:21:10.535811 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:11 crc kubenswrapper[4869]: I0929 14:21:11.073131 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vwdzz"] Sep 29 14:21:11 crc kubenswrapper[4869]: W0929 14:21:11.074591 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12ee57ef_16b3_43d7_81ed_353c5922e253.slice/crio-f8fd5f9e156b89fc417e57619c4b36e3c639a13d4c85ae7eacd8b0a1dc17c389 WatchSource:0}: Error finding container f8fd5f9e156b89fc417e57619c4b36e3c639a13d4c85ae7eacd8b0a1dc17c389: Status 404 returned error can't find the container with id f8fd5f9e156b89fc417e57619c4b36e3c639a13d4c85ae7eacd8b0a1dc17c389 Sep 29 14:21:11 crc kubenswrapper[4869]: I0929 14:21:11.621635 4869 generic.go:334] "Generic (PLEG): container finished" podID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerID="f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438" exitCode=0 Sep 29 14:21:11 crc kubenswrapper[4869]: I0929 14:21:11.621708 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwdzz" event={"ID":"12ee57ef-16b3-43d7-81ed-353c5922e253","Type":"ContainerDied","Data":"f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438"} Sep 29 14:21:11 crc kubenswrapper[4869]: I0929 14:21:11.621936 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwdzz" event={"ID":"12ee57ef-16b3-43d7-81ed-353c5922e253","Type":"ContainerStarted","Data":"f8fd5f9e156b89fc417e57619c4b36e3c639a13d4c85ae7eacd8b0a1dc17c389"} Sep 29 14:21:14 crc kubenswrapper[4869]: I0929 14:21:14.652634 4869 generic.go:334] "Generic (PLEG): container finished" podID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerID="bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f" exitCode=0 Sep 29 14:21:14 crc kubenswrapper[4869]: I0929 14:21:14.654099 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwdzz" event={"ID":"12ee57ef-16b3-43d7-81ed-353c5922e253","Type":"ContainerDied","Data":"bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f"} Sep 29 14:21:16 crc kubenswrapper[4869]: I0929 14:21:16.242693 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:21:16 crc kubenswrapper[4869]: E0929 14:21:16.243731 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:21:16 crc kubenswrapper[4869]: I0929 14:21:16.673663 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwdzz" event={"ID":"12ee57ef-16b3-43d7-81ed-353c5922e253","Type":"ContainerStarted","Data":"2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c"} Sep 29 14:21:16 crc kubenswrapper[4869]: I0929 14:21:16.696735 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vwdzz" podStartSLOduration=2.6292862809999997 podStartE2EDuration="6.696714073s" podCreationTimestamp="2025-09-29 14:21:10 +0000 UTC" firstStartedPulling="2025-09-29 14:21:11.625787733 +0000 UTC m=+2398.066432053" lastFinishedPulling="2025-09-29 14:21:15.693215525 +0000 UTC m=+2402.133859845" observedRunningTime="2025-09-29 14:21:16.694403803 +0000 UTC m=+2403.135048123" watchObservedRunningTime="2025-09-29 14:21:16.696714073 +0000 UTC m=+2403.137358413" Sep 29 14:21:20 crc kubenswrapper[4869]: I0929 14:21:20.536687 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:20 crc kubenswrapper[4869]: I0929 14:21:20.537789 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:20 crc kubenswrapper[4869]: I0929 14:21:20.584763 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:20 crc kubenswrapper[4869]: I0929 14:21:20.762100 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:20 crc kubenswrapper[4869]: I0929 14:21:20.821315 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vwdzz"] Sep 29 14:21:21 crc kubenswrapper[4869]: E0929 14:21:21.948387 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12ee57ef_16b3_43d7_81ed_353c5922e253.slice/crio-bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:21:22 crc kubenswrapper[4869]: I0929 14:21:22.725706 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2d2928f-faba-4b25-90fb-dcaa88a3b515" containerID="520909172eaf63591d92e75ab15f13e44cae14f1b195a11636b8c4ab647c6672" exitCode=0 Sep 29 14:21:22 crc kubenswrapper[4869]: I0929 14:21:22.725801 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" event={"ID":"c2d2928f-faba-4b25-90fb-dcaa88a3b515","Type":"ContainerDied","Data":"520909172eaf63591d92e75ab15f13e44cae14f1b195a11636b8c4ab647c6672"} Sep 29 14:21:22 crc kubenswrapper[4869]: I0929 14:21:22.726187 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vwdzz" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerName="registry-server" containerID="cri-o://2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c" gracePeriod=2 Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.139953 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.251191 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-utilities\") pod \"12ee57ef-16b3-43d7-81ed-353c5922e253\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.251355 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89w7g\" (UniqueName: \"kubernetes.io/projected/12ee57ef-16b3-43d7-81ed-353c5922e253-kube-api-access-89w7g\") pod \"12ee57ef-16b3-43d7-81ed-353c5922e253\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.251382 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-catalog-content\") pod \"12ee57ef-16b3-43d7-81ed-353c5922e253\" (UID: \"12ee57ef-16b3-43d7-81ed-353c5922e253\") " Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.252339 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-utilities" (OuterVolumeSpecName: "utilities") pod "12ee57ef-16b3-43d7-81ed-353c5922e253" (UID: "12ee57ef-16b3-43d7-81ed-353c5922e253"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.257405 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12ee57ef-16b3-43d7-81ed-353c5922e253-kube-api-access-89w7g" (OuterVolumeSpecName: "kube-api-access-89w7g") pod "12ee57ef-16b3-43d7-81ed-353c5922e253" (UID: "12ee57ef-16b3-43d7-81ed-353c5922e253"). InnerVolumeSpecName "kube-api-access-89w7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.306339 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12ee57ef-16b3-43d7-81ed-353c5922e253" (UID: "12ee57ef-16b3-43d7-81ed-353c5922e253"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.353438 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89w7g\" (UniqueName: \"kubernetes.io/projected/12ee57ef-16b3-43d7-81ed-353c5922e253-kube-api-access-89w7g\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.353487 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.353497 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12ee57ef-16b3-43d7-81ed-353c5922e253-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.738123 4869 generic.go:334] "Generic (PLEG): container finished" podID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerID="2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c" exitCode=0 Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.738208 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwdzz" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.738198 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwdzz" event={"ID":"12ee57ef-16b3-43d7-81ed-353c5922e253","Type":"ContainerDied","Data":"2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c"} Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.738361 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwdzz" event={"ID":"12ee57ef-16b3-43d7-81ed-353c5922e253","Type":"ContainerDied","Data":"f8fd5f9e156b89fc417e57619c4b36e3c639a13d4c85ae7eacd8b0a1dc17c389"} Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.738408 4869 scope.go:117] "RemoveContainer" containerID="2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.777054 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vwdzz"] Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.781289 4869 scope.go:117] "RemoveContainer" containerID="bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.785862 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vwdzz"] Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.807940 4869 scope.go:117] "RemoveContainer" containerID="f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.858254 4869 scope.go:117] "RemoveContainer" containerID="2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c" Sep 29 14:21:23 crc kubenswrapper[4869]: E0929 14:21:23.858718 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c\": container with ID starting with 2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c not found: ID does not exist" containerID="2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.858749 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c"} err="failed to get container status \"2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c\": rpc error: code = NotFound desc = could not find container \"2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c\": container with ID starting with 2aefbd9da1f9fc50cc867c3109d9508482d3183cc2bfde360ce18246d07b825c not found: ID does not exist" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.858770 4869 scope.go:117] "RemoveContainer" containerID="bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f" Sep 29 14:21:23 crc kubenswrapper[4869]: E0929 14:21:23.859246 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f\": container with ID starting with bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f not found: ID does not exist" containerID="bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.859282 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f"} err="failed to get container status \"bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f\": rpc error: code = NotFound desc = could not find container \"bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f\": container with ID starting with bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f not found: ID does not exist" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.859309 4869 scope.go:117] "RemoveContainer" containerID="f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438" Sep 29 14:21:23 crc kubenswrapper[4869]: E0929 14:21:23.859739 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438\": container with ID starting with f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438 not found: ID does not exist" containerID="f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438" Sep 29 14:21:23 crc kubenswrapper[4869]: I0929 14:21:23.859765 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438"} err="failed to get container status \"f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438\": rpc error: code = NotFound desc = could not find container \"f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438\": container with ID starting with f64ba39ba0b89260feb5eabc550d0c58b3205cb1c9e5c29da599d4666a226438 not found: ID does not exist" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.109193 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.167899 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ceph\") pod \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.168917 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-inventory\") pod \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.169005 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4w2q2\" (UniqueName: \"kubernetes.io/projected/c2d2928f-faba-4b25-90fb-dcaa88a3b515-kube-api-access-4w2q2\") pod \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.169226 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ssh-key\") pod \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\" (UID: \"c2d2928f-faba-4b25-90fb-dcaa88a3b515\") " Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.171935 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2d2928f-faba-4b25-90fb-dcaa88a3b515-kube-api-access-4w2q2" (OuterVolumeSpecName: "kube-api-access-4w2q2") pod "c2d2928f-faba-4b25-90fb-dcaa88a3b515" (UID: "c2d2928f-faba-4b25-90fb-dcaa88a3b515"). InnerVolumeSpecName "kube-api-access-4w2q2". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.172731 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ceph" (OuterVolumeSpecName: "ceph") pod "c2d2928f-faba-4b25-90fb-dcaa88a3b515" (UID: "c2d2928f-faba-4b25-90fb-dcaa88a3b515"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.198163 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c2d2928f-faba-4b25-90fb-dcaa88a3b515" (UID: "c2d2928f-faba-4b25-90fb-dcaa88a3b515"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.200534 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-inventory" (OuterVolumeSpecName: "inventory") pod "c2d2928f-faba-4b25-90fb-dcaa88a3b515" (UID: "c2d2928f-faba-4b25-90fb-dcaa88a3b515"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.274140 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.274175 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4w2q2\" (UniqueName: \"kubernetes.io/projected/c2d2928f-faba-4b25-90fb-dcaa88a3b515-kube-api-access-4w2q2\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.274187 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.274196 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2d2928f-faba-4b25-90fb-dcaa88a3b515-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.278602 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" path="/var/lib/kubelet/pods/12ee57ef-16b3-43d7-81ed-353c5922e253/volumes" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.748355 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" event={"ID":"c2d2928f-faba-4b25-90fb-dcaa88a3b515","Type":"ContainerDied","Data":"f91bd2baad2ad79b89aac8dc1ce7aec43e6b7c5771e8cbb8b793c28d38317d67"} Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.748399 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f91bd2baad2ad79b89aac8dc1ce7aec43e6b7c5771e8cbb8b793c28d38317d67" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.748478 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v2crd" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.820354 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv"] Sep 29 14:21:24 crc kubenswrapper[4869]: E0929 14:21:24.820800 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerName="registry-server" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.820818 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerName="registry-server" Sep 29 14:21:24 crc kubenswrapper[4869]: E0929 14:21:24.820844 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerName="extract-content" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.820852 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerName="extract-content" Sep 29 14:21:24 crc kubenswrapper[4869]: E0929 14:21:24.820876 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerName="extract-utilities" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.820882 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerName="extract-utilities" Sep 29 14:21:24 crc kubenswrapper[4869]: E0929 14:21:24.820894 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d2928f-faba-4b25-90fb-dcaa88a3b515" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.820901 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d2928f-faba-4b25-90fb-dcaa88a3b515" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.821068 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="12ee57ef-16b3-43d7-81ed-353c5922e253" containerName="registry-server" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.821087 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2d2928f-faba-4b25-90fb-dcaa88a3b515" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.821838 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.825324 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.827455 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.827517 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.827522 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.829554 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.831990 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv"] Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.885746 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.885856 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.885991 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t8c6\" (UniqueName: \"kubernetes.io/projected/975110b8-8030-4279-9b45-03a9523a9012-kube-api-access-4t8c6\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.886026 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.988140 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t8c6\" (UniqueName: \"kubernetes.io/projected/975110b8-8030-4279-9b45-03a9523a9012-kube-api-access-4t8c6\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.988188 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.988217 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.988298 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.992735 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.992831 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:24 crc kubenswrapper[4869]: I0929 14:21:24.993070 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:25 crc kubenswrapper[4869]: I0929 14:21:25.005491 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t8c6\" (UniqueName: \"kubernetes.io/projected/975110b8-8030-4279-9b45-03a9523a9012-kube-api-access-4t8c6\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:25 crc kubenswrapper[4869]: I0929 14:21:25.137384 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:25 crc kubenswrapper[4869]: I0929 14:21:25.608689 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv"] Sep 29 14:21:25 crc kubenswrapper[4869]: I0929 14:21:25.759598 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" event={"ID":"975110b8-8030-4279-9b45-03a9523a9012","Type":"ContainerStarted","Data":"8fdcf00fc017db755a7b7af88918c6b051537cc083c2132c4821c4bdfe2a1a99"} Sep 29 14:21:26 crc kubenswrapper[4869]: I0929 14:21:26.768179 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" event={"ID":"975110b8-8030-4279-9b45-03a9523a9012","Type":"ContainerStarted","Data":"1e95af10a5841e24a637379223a48f25023f7a95fdb3f509b9d8861ccb51b0c4"} Sep 29 14:21:26 crc kubenswrapper[4869]: I0929 14:21:26.785510 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" podStartSLOduration=2.330404933 podStartE2EDuration="2.785492497s" podCreationTimestamp="2025-09-29 14:21:24 +0000 UTC" firstStartedPulling="2025-09-29 14:21:25.611357022 +0000 UTC m=+2412.052001342" lastFinishedPulling="2025-09-29 14:21:26.066444586 +0000 UTC m=+2412.507088906" observedRunningTime="2025-09-29 14:21:26.784816789 +0000 UTC m=+2413.225461109" watchObservedRunningTime="2025-09-29 14:21:26.785492497 +0000 UTC m=+2413.226136817" Sep 29 14:21:29 crc kubenswrapper[4869]: I0929 14:21:29.242518 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:21:29 crc kubenswrapper[4869]: E0929 14:21:29.243201 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:21:30 crc kubenswrapper[4869]: I0929 14:21:30.804162 4869 generic.go:334] "Generic (PLEG): container finished" podID="975110b8-8030-4279-9b45-03a9523a9012" containerID="1e95af10a5841e24a637379223a48f25023f7a95fdb3f509b9d8861ccb51b0c4" exitCode=0 Sep 29 14:21:30 crc kubenswrapper[4869]: I0929 14:21:30.804254 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" event={"ID":"975110b8-8030-4279-9b45-03a9523a9012","Type":"ContainerDied","Data":"1e95af10a5841e24a637379223a48f25023f7a95fdb3f509b9d8861ccb51b0c4"} Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.201995 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:32 crc kubenswrapper[4869]: E0929 14:21:32.204967 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12ee57ef_16b3_43d7_81ed_353c5922e253.slice/crio-bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.331565 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ssh-key\") pod \"975110b8-8030-4279-9b45-03a9523a9012\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.331671 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-inventory\") pod \"975110b8-8030-4279-9b45-03a9523a9012\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.331702 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ceph\") pod \"975110b8-8030-4279-9b45-03a9523a9012\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.331749 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4t8c6\" (UniqueName: \"kubernetes.io/projected/975110b8-8030-4279-9b45-03a9523a9012-kube-api-access-4t8c6\") pod \"975110b8-8030-4279-9b45-03a9523a9012\" (UID: \"975110b8-8030-4279-9b45-03a9523a9012\") " Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.337508 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ceph" (OuterVolumeSpecName: "ceph") pod "975110b8-8030-4279-9b45-03a9523a9012" (UID: "975110b8-8030-4279-9b45-03a9523a9012"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.337542 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/975110b8-8030-4279-9b45-03a9523a9012-kube-api-access-4t8c6" (OuterVolumeSpecName: "kube-api-access-4t8c6") pod "975110b8-8030-4279-9b45-03a9523a9012" (UID: "975110b8-8030-4279-9b45-03a9523a9012"). InnerVolumeSpecName "kube-api-access-4t8c6". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.361939 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "975110b8-8030-4279-9b45-03a9523a9012" (UID: "975110b8-8030-4279-9b45-03a9523a9012"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.362334 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-inventory" (OuterVolumeSpecName: "inventory") pod "975110b8-8030-4279-9b45-03a9523a9012" (UID: "975110b8-8030-4279-9b45-03a9523a9012"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.433831 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.433868 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.433876 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/975110b8-8030-4279-9b45-03a9523a9012-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.433885 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4t8c6\" (UniqueName: \"kubernetes.io/projected/975110b8-8030-4279-9b45-03a9523a9012-kube-api-access-4t8c6\") on node \"crc\" DevicePath \"\"" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.823871 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" event={"ID":"975110b8-8030-4279-9b45-03a9523a9012","Type":"ContainerDied","Data":"8fdcf00fc017db755a7b7af88918c6b051537cc083c2132c4821c4bdfe2a1a99"} Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.823912 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8fdcf00fc017db755a7b7af88918c6b051537cc083c2132c4821c4bdfe2a1a99" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.824027 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.909522 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j"] Sep 29 14:21:32 crc kubenswrapper[4869]: E0929 14:21:32.910059 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="975110b8-8030-4279-9b45-03a9523a9012" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.910089 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="975110b8-8030-4279-9b45-03a9523a9012" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.910312 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="975110b8-8030-4279-9b45-03a9523a9012" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.911164 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.918498 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.918781 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.918958 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.919085 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.919222 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.921311 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j"] Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.941480 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtsvr\" (UniqueName: \"kubernetes.io/projected/5705d408-f08f-47cf-b786-733c2f6f55e2-kube-api-access-xtsvr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.941534 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.941570 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:32 crc kubenswrapper[4869]: I0929 14:21:32.941652 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.043461 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtsvr\" (UniqueName: \"kubernetes.io/projected/5705d408-f08f-47cf-b786-733c2f6f55e2-kube-api-access-xtsvr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.043549 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.043599 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.043703 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.048991 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.049099 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.051463 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.062493 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtsvr\" (UniqueName: \"kubernetes.io/projected/5705d408-f08f-47cf-b786-733c2f6f55e2-kube-api-access-xtsvr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.233412 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.718080 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j"] Sep 29 14:21:33 crc kubenswrapper[4869]: I0929 14:21:33.832916 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" event={"ID":"5705d408-f08f-47cf-b786-733c2f6f55e2","Type":"ContainerStarted","Data":"f2d5e1a72a05d2f6d6f5d59856cf08c1ded812093de7c02e613677bb08274404"} Sep 29 14:21:34 crc kubenswrapper[4869]: I0929 14:21:34.844228 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" event={"ID":"5705d408-f08f-47cf-b786-733c2f6f55e2","Type":"ContainerStarted","Data":"345e9a82f6d4ebd5171035eb585e4e476e0050d0f2ab2d852f7de990cebd3bd5"} Sep 29 14:21:41 crc kubenswrapper[4869]: I0929 14:21:41.242037 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:21:41 crc kubenswrapper[4869]: E0929 14:21:41.242851 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:21:42 crc kubenswrapper[4869]: E0929 14:21:42.443224 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12ee57ef_16b3_43d7_81ed_353c5922e253.slice/crio-bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:21:52 crc kubenswrapper[4869]: E0929 14:21:52.698447 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12ee57ef_16b3_43d7_81ed_353c5922e253.slice/crio-bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:21:54 crc kubenswrapper[4869]: I0929 14:21:54.247786 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:21:54 crc kubenswrapper[4869]: E0929 14:21:54.248328 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:22:02 crc kubenswrapper[4869]: E0929 14:22:02.940688 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12ee57ef_16b3_43d7_81ed_353c5922e253.slice/crio-bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:22:07 crc kubenswrapper[4869]: I0929 14:22:07.241638 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:22:07 crc kubenswrapper[4869]: E0929 14:22:07.242327 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:22:13 crc kubenswrapper[4869]: E0929 14:22:13.173747 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12ee57ef_16b3_43d7_81ed_353c5922e253.slice/crio-bb2d5c407c48b802d0e8cc505a2552390403118f68b415580fb63f5fd058be8f.scope\": RecentStats: unable to find data in memory cache]" Sep 29 14:22:17 crc kubenswrapper[4869]: I0929 14:22:17.219696 4869 generic.go:334] "Generic (PLEG): container finished" podID="5705d408-f08f-47cf-b786-733c2f6f55e2" containerID="345e9a82f6d4ebd5171035eb585e4e476e0050d0f2ab2d852f7de990cebd3bd5" exitCode=0 Sep 29 14:22:17 crc kubenswrapper[4869]: I0929 14:22:17.219880 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" event={"ID":"5705d408-f08f-47cf-b786-733c2f6f55e2","Type":"ContainerDied","Data":"345e9a82f6d4ebd5171035eb585e4e476e0050d0f2ab2d852f7de990cebd3bd5"} Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.627747 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.740346 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ceph\") pod \"5705d408-f08f-47cf-b786-733c2f6f55e2\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.740476 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ssh-key\") pod \"5705d408-f08f-47cf-b786-733c2f6f55e2\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.740671 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtsvr\" (UniqueName: \"kubernetes.io/projected/5705d408-f08f-47cf-b786-733c2f6f55e2-kube-api-access-xtsvr\") pod \"5705d408-f08f-47cf-b786-733c2f6f55e2\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.740779 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-inventory\") pod \"5705d408-f08f-47cf-b786-733c2f6f55e2\" (UID: \"5705d408-f08f-47cf-b786-733c2f6f55e2\") " Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.748025 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ceph" (OuterVolumeSpecName: "ceph") pod "5705d408-f08f-47cf-b786-733c2f6f55e2" (UID: "5705d408-f08f-47cf-b786-733c2f6f55e2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.748085 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5705d408-f08f-47cf-b786-733c2f6f55e2-kube-api-access-xtsvr" (OuterVolumeSpecName: "kube-api-access-xtsvr") pod "5705d408-f08f-47cf-b786-733c2f6f55e2" (UID: "5705d408-f08f-47cf-b786-733c2f6f55e2"). InnerVolumeSpecName "kube-api-access-xtsvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.766496 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-inventory" (OuterVolumeSpecName: "inventory") pod "5705d408-f08f-47cf-b786-733c2f6f55e2" (UID: "5705d408-f08f-47cf-b786-733c2f6f55e2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.767362 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5705d408-f08f-47cf-b786-733c2f6f55e2" (UID: "5705d408-f08f-47cf-b786-733c2f6f55e2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.842947 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtsvr\" (UniqueName: \"kubernetes.io/projected/5705d408-f08f-47cf-b786-733c2f6f55e2-kube-api-access-xtsvr\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.843258 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.843319 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:18 crc kubenswrapper[4869]: I0929 14:22:18.843380 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5705d408-f08f-47cf-b786-733c2f6f55e2-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.236649 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" event={"ID":"5705d408-f08f-47cf-b786-733c2f6f55e2","Type":"ContainerDied","Data":"f2d5e1a72a05d2f6d6f5d59856cf08c1ded812093de7c02e613677bb08274404"} Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.236697 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2d5e1a72a05d2f6d6f5d59856cf08c1ded812093de7c02e613677bb08274404" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.236775 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.338403 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-gv9vj"] Sep 29 14:22:19 crc kubenswrapper[4869]: E0929 14:22:19.339216 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5705d408-f08f-47cf-b786-733c2f6f55e2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.339257 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="5705d408-f08f-47cf-b786-733c2f6f55e2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.339695 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="5705d408-f08f-47cf-b786-733c2f6f55e2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.341328 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.345025 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.345224 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.345329 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.345143 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.345204 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.350452 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-gv9vj"] Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.455258 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.455544 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ceph\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.456076 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.456314 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blbgb\" (UniqueName: \"kubernetes.io/projected/bebe6640-f31f-4487-9e19-8a829719abfd-kube-api-access-blbgb\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.558914 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.558998 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blbgb\" (UniqueName: \"kubernetes.io/projected/bebe6640-f31f-4487-9e19-8a829719abfd-kube-api-access-blbgb\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.559063 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.559143 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ceph\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.564817 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.564872 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ceph\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.569379 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.582857 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blbgb\" (UniqueName: \"kubernetes.io/projected/bebe6640-f31f-4487-9e19-8a829719abfd-kube-api-access-blbgb\") pod \"ssh-known-hosts-edpm-deployment-gv9vj\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:19 crc kubenswrapper[4869]: I0929 14:22:19.669190 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:20 crc kubenswrapper[4869]: I0929 14:22:20.197548 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-gv9vj"] Sep 29 14:22:20 crc kubenswrapper[4869]: I0929 14:22:20.242376 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:22:20 crc kubenswrapper[4869]: E0929 14:22:20.242598 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:22:20 crc kubenswrapper[4869]: I0929 14:22:20.252695 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" event={"ID":"bebe6640-f31f-4487-9e19-8a829719abfd","Type":"ContainerStarted","Data":"f85654ac852b4050a3134f97e7e12d317d5fbb136d5f5a89b40cd9f2d5d3f2b9"} Sep 29 14:22:21 crc kubenswrapper[4869]: I0929 14:22:21.262894 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" event={"ID":"bebe6640-f31f-4487-9e19-8a829719abfd","Type":"ContainerStarted","Data":"412a45dc307c021b70455753679a9acb94860182d13a60d9fb0967ab80142a62"} Sep 29 14:22:21 crc kubenswrapper[4869]: I0929 14:22:21.279828 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" podStartSLOduration=1.63836642 podStartE2EDuration="2.279808018s" podCreationTimestamp="2025-09-29 14:22:19 +0000 UTC" firstStartedPulling="2025-09-29 14:22:20.212128184 +0000 UTC m=+2466.652772504" lastFinishedPulling="2025-09-29 14:22:20.853569782 +0000 UTC m=+2467.294214102" observedRunningTime="2025-09-29 14:22:21.276857062 +0000 UTC m=+2467.717501392" watchObservedRunningTime="2025-09-29 14:22:21.279808018 +0000 UTC m=+2467.720452338" Sep 29 14:22:30 crc kubenswrapper[4869]: I0929 14:22:30.340503 4869 generic.go:334] "Generic (PLEG): container finished" podID="bebe6640-f31f-4487-9e19-8a829719abfd" containerID="412a45dc307c021b70455753679a9acb94860182d13a60d9fb0967ab80142a62" exitCode=0 Sep 29 14:22:30 crc kubenswrapper[4869]: I0929 14:22:30.340642 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" event={"ID":"bebe6640-f31f-4487-9e19-8a829719abfd","Type":"ContainerDied","Data":"412a45dc307c021b70455753679a9acb94860182d13a60d9fb0967ab80142a62"} Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.243159 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:22:31 crc kubenswrapper[4869]: E0929 14:22:31.243408 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.751208 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.800942 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ceph\") pod \"bebe6640-f31f-4487-9e19-8a829719abfd\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.801257 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blbgb\" (UniqueName: \"kubernetes.io/projected/bebe6640-f31f-4487-9e19-8a829719abfd-kube-api-access-blbgb\") pod \"bebe6640-f31f-4487-9e19-8a829719abfd\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.801504 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ssh-key-openstack-edpm-ipam\") pod \"bebe6640-f31f-4487-9e19-8a829719abfd\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.802213 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-inventory-0\") pod \"bebe6640-f31f-4487-9e19-8a829719abfd\" (UID: \"bebe6640-f31f-4487-9e19-8a829719abfd\") " Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.807883 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ceph" (OuterVolumeSpecName: "ceph") pod "bebe6640-f31f-4487-9e19-8a829719abfd" (UID: "bebe6640-f31f-4487-9e19-8a829719abfd"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.808027 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bebe6640-f31f-4487-9e19-8a829719abfd-kube-api-access-blbgb" (OuterVolumeSpecName: "kube-api-access-blbgb") pod "bebe6640-f31f-4487-9e19-8a829719abfd" (UID: "bebe6640-f31f-4487-9e19-8a829719abfd"). InnerVolumeSpecName "kube-api-access-blbgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.828194 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "bebe6640-f31f-4487-9e19-8a829719abfd" (UID: "bebe6640-f31f-4487-9e19-8a829719abfd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.829058 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "bebe6640-f31f-4487-9e19-8a829719abfd" (UID: "bebe6640-f31f-4487-9e19-8a829719abfd"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.904820 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.904864 4869 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-inventory-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.904876 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bebe6640-f31f-4487-9e19-8a829719abfd-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:31 crc kubenswrapper[4869]: I0929 14:22:31.904885 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blbgb\" (UniqueName: \"kubernetes.io/projected/bebe6640-f31f-4487-9e19-8a829719abfd-kube-api-access-blbgb\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.365802 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" event={"ID":"bebe6640-f31f-4487-9e19-8a829719abfd","Type":"ContainerDied","Data":"f85654ac852b4050a3134f97e7e12d317d5fbb136d5f5a89b40cd9f2d5d3f2b9"} Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.365888 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f85654ac852b4050a3134f97e7e12d317d5fbb136d5f5a89b40cd9f2d5d3f2b9" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.365893 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-gv9vj" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.432593 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5"] Sep 29 14:22:32 crc kubenswrapper[4869]: E0929 14:22:32.433044 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bebe6640-f31f-4487-9e19-8a829719abfd" containerName="ssh-known-hosts-edpm-deployment" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.433059 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="bebe6640-f31f-4487-9e19-8a829719abfd" containerName="ssh-known-hosts-edpm-deployment" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.433219 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="bebe6640-f31f-4487-9e19-8a829719abfd" containerName="ssh-known-hosts-edpm-deployment" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.433925 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.436957 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.437223 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.437437 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.437592 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.437753 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.456737 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5"] Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.517109 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.517265 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.517293 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.517701 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnpk8\" (UniqueName: \"kubernetes.io/projected/6e87d90c-73d6-4e83-87b9-09dc55557160-kube-api-access-qnpk8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.620281 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.620384 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.620410 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.620504 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnpk8\" (UniqueName: \"kubernetes.io/projected/6e87d90c-73d6-4e83-87b9-09dc55557160-kube-api-access-qnpk8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.626183 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.626772 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.627489 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.640540 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnpk8\" (UniqueName: \"kubernetes.io/projected/6e87d90c-73d6-4e83-87b9-09dc55557160-kube-api-access-qnpk8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v56q5\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:32 crc kubenswrapper[4869]: I0929 14:22:32.755449 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:33 crc kubenswrapper[4869]: I0929 14:22:33.332114 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5"] Sep 29 14:22:33 crc kubenswrapper[4869]: I0929 14:22:33.375464 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" event={"ID":"6e87d90c-73d6-4e83-87b9-09dc55557160","Type":"ContainerStarted","Data":"37760050730f3e0093740ae90fe2214c548240ca9228fea60bf5e6dd0809477c"} Sep 29 14:22:35 crc kubenswrapper[4869]: I0929 14:22:35.392653 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" event={"ID":"6e87d90c-73d6-4e83-87b9-09dc55557160","Type":"ContainerStarted","Data":"2c2b365bb69ba0a813a18a7fc3947f59a6b01398a4d9c928dcf51dfcc9175c70"} Sep 29 14:22:35 crc kubenswrapper[4869]: I0929 14:22:35.414153 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" podStartSLOduration=2.042855848 podStartE2EDuration="3.414130226s" podCreationTimestamp="2025-09-29 14:22:32 +0000 UTC" firstStartedPulling="2025-09-29 14:22:33.33931906 +0000 UTC m=+2479.779963400" lastFinishedPulling="2025-09-29 14:22:34.710593458 +0000 UTC m=+2481.151237778" observedRunningTime="2025-09-29 14:22:35.411136389 +0000 UTC m=+2481.851780709" watchObservedRunningTime="2025-09-29 14:22:35.414130226 +0000 UTC m=+2481.854774546" Sep 29 14:22:42 crc kubenswrapper[4869]: I0929 14:22:42.454970 4869 generic.go:334] "Generic (PLEG): container finished" podID="6e87d90c-73d6-4e83-87b9-09dc55557160" containerID="2c2b365bb69ba0a813a18a7fc3947f59a6b01398a4d9c928dcf51dfcc9175c70" exitCode=0 Sep 29 14:22:42 crc kubenswrapper[4869]: I0929 14:22:42.455018 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" event={"ID":"6e87d90c-73d6-4e83-87b9-09dc55557160","Type":"ContainerDied","Data":"2c2b365bb69ba0a813a18a7fc3947f59a6b01398a4d9c928dcf51dfcc9175c70"} Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.860481 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.925258 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnpk8\" (UniqueName: \"kubernetes.io/projected/6e87d90c-73d6-4e83-87b9-09dc55557160-kube-api-access-qnpk8\") pod \"6e87d90c-73d6-4e83-87b9-09dc55557160\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.926284 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ceph\") pod \"6e87d90c-73d6-4e83-87b9-09dc55557160\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.926376 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ssh-key\") pod \"6e87d90c-73d6-4e83-87b9-09dc55557160\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.926420 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-inventory\") pod \"6e87d90c-73d6-4e83-87b9-09dc55557160\" (UID: \"6e87d90c-73d6-4e83-87b9-09dc55557160\") " Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.931703 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e87d90c-73d6-4e83-87b9-09dc55557160-kube-api-access-qnpk8" (OuterVolumeSpecName: "kube-api-access-qnpk8") pod "6e87d90c-73d6-4e83-87b9-09dc55557160" (UID: "6e87d90c-73d6-4e83-87b9-09dc55557160"). InnerVolumeSpecName "kube-api-access-qnpk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.932980 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ceph" (OuterVolumeSpecName: "ceph") pod "6e87d90c-73d6-4e83-87b9-09dc55557160" (UID: "6e87d90c-73d6-4e83-87b9-09dc55557160"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.956426 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6e87d90c-73d6-4e83-87b9-09dc55557160" (UID: "6e87d90c-73d6-4e83-87b9-09dc55557160"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:43 crc kubenswrapper[4869]: I0929 14:22:43.959646 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-inventory" (OuterVolumeSpecName: "inventory") pod "6e87d90c-73d6-4e83-87b9-09dc55557160" (UID: "6e87d90c-73d6-4e83-87b9-09dc55557160"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.028666 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.029083 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnpk8\" (UniqueName: \"kubernetes.io/projected/6e87d90c-73d6-4e83-87b9-09dc55557160-kube-api-access-qnpk8\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.029179 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.029254 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6e87d90c-73d6-4e83-87b9-09dc55557160-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.473540 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" event={"ID":"6e87d90c-73d6-4e83-87b9-09dc55557160","Type":"ContainerDied","Data":"37760050730f3e0093740ae90fe2214c548240ca9228fea60bf5e6dd0809477c"} Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.473824 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37760050730f3e0093740ae90fe2214c548240ca9228fea60bf5e6dd0809477c" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.473623 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v56q5" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.546044 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg"] Sep 29 14:22:44 crc kubenswrapper[4869]: E0929 14:22:44.546476 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e87d90c-73d6-4e83-87b9-09dc55557160" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.546499 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e87d90c-73d6-4e83-87b9-09dc55557160" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.546750 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e87d90c-73d6-4e83-87b9-09dc55557160" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.547371 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.549982 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.550027 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.550416 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.550516 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.551346 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.557726 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg"] Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.640925 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxdln\" (UniqueName: \"kubernetes.io/projected/aee823b2-67f9-4014-8015-f9094f636f20-kube-api-access-kxdln\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.641275 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.641363 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.641459 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.742426 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.742568 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.742655 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.742697 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxdln\" (UniqueName: \"kubernetes.io/projected/aee823b2-67f9-4014-8015-f9094f636f20-kube-api-access-kxdln\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.747815 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.749142 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.756036 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.762580 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxdln\" (UniqueName: \"kubernetes.io/projected/aee823b2-67f9-4014-8015-f9094f636f20-kube-api-access-kxdln\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:44 crc kubenswrapper[4869]: I0929 14:22:44.864112 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:45 crc kubenswrapper[4869]: I0929 14:22:45.242113 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:22:45 crc kubenswrapper[4869]: E0929 14:22:45.242380 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:22:45 crc kubenswrapper[4869]: I0929 14:22:45.353369 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg"] Sep 29 14:22:45 crc kubenswrapper[4869]: I0929 14:22:45.483236 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" event={"ID":"aee823b2-67f9-4014-8015-f9094f636f20","Type":"ContainerStarted","Data":"87a1ba62ee931ee4f4febe91932aacaf1bb29e433284fcd752d2decbea9b137c"} Sep 29 14:22:46 crc kubenswrapper[4869]: I0929 14:22:46.503712 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" event={"ID":"aee823b2-67f9-4014-8015-f9094f636f20","Type":"ContainerStarted","Data":"4b0fbe6ea58304bf702b6258b400fa6d37a0f2888ca930c3204cd8282873d727"} Sep 29 14:22:46 crc kubenswrapper[4869]: I0929 14:22:46.521956 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" podStartSLOduration=2.077762092 podStartE2EDuration="2.521933163s" podCreationTimestamp="2025-09-29 14:22:44 +0000 UTC" firstStartedPulling="2025-09-29 14:22:45.361656278 +0000 UTC m=+2491.802300598" lastFinishedPulling="2025-09-29 14:22:45.805827349 +0000 UTC m=+2492.246471669" observedRunningTime="2025-09-29 14:22:46.520087895 +0000 UTC m=+2492.960732215" watchObservedRunningTime="2025-09-29 14:22:46.521933163 +0000 UTC m=+2492.962577483" Sep 29 14:22:55 crc kubenswrapper[4869]: I0929 14:22:55.582925 4869 generic.go:334] "Generic (PLEG): container finished" podID="aee823b2-67f9-4014-8015-f9094f636f20" containerID="4b0fbe6ea58304bf702b6258b400fa6d37a0f2888ca930c3204cd8282873d727" exitCode=0 Sep 29 14:22:55 crc kubenswrapper[4869]: I0929 14:22:55.583010 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" event={"ID":"aee823b2-67f9-4014-8015-f9094f636f20","Type":"ContainerDied","Data":"4b0fbe6ea58304bf702b6258b400fa6d37a0f2888ca930c3204cd8282873d727"} Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.031469 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.110773 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxdln\" (UniqueName: \"kubernetes.io/projected/aee823b2-67f9-4014-8015-f9094f636f20-kube-api-access-kxdln\") pod \"aee823b2-67f9-4014-8015-f9094f636f20\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.110879 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ceph\") pod \"aee823b2-67f9-4014-8015-f9094f636f20\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.110934 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-inventory\") pod \"aee823b2-67f9-4014-8015-f9094f636f20\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.111128 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ssh-key\") pod \"aee823b2-67f9-4014-8015-f9094f636f20\" (UID: \"aee823b2-67f9-4014-8015-f9094f636f20\") " Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.118393 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aee823b2-67f9-4014-8015-f9094f636f20-kube-api-access-kxdln" (OuterVolumeSpecName: "kube-api-access-kxdln") pod "aee823b2-67f9-4014-8015-f9094f636f20" (UID: "aee823b2-67f9-4014-8015-f9094f636f20"). InnerVolumeSpecName "kube-api-access-kxdln". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.118995 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ceph" (OuterVolumeSpecName: "ceph") pod "aee823b2-67f9-4014-8015-f9094f636f20" (UID: "aee823b2-67f9-4014-8015-f9094f636f20"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.148068 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-inventory" (OuterVolumeSpecName: "inventory") pod "aee823b2-67f9-4014-8015-f9094f636f20" (UID: "aee823b2-67f9-4014-8015-f9094f636f20"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.157155 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "aee823b2-67f9-4014-8015-f9094f636f20" (UID: "aee823b2-67f9-4014-8015-f9094f636f20"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.213770 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.213813 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.213825 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aee823b2-67f9-4014-8015-f9094f636f20-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.213836 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxdln\" (UniqueName: \"kubernetes.io/projected/aee823b2-67f9-4014-8015-f9094f636f20-kube-api-access-kxdln\") on node \"crc\" DevicePath \"\"" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.243101 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:22:57 crc kubenswrapper[4869]: E0929 14:22:57.243450 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.602913 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" event={"ID":"aee823b2-67f9-4014-8015-f9094f636f20","Type":"ContainerDied","Data":"87a1ba62ee931ee4f4febe91932aacaf1bb29e433284fcd752d2decbea9b137c"} Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.602974 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87a1ba62ee931ee4f4febe91932aacaf1bb29e433284fcd752d2decbea9b137c" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.602939 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.734235 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b"] Sep 29 14:22:57 crc kubenswrapper[4869]: E0929 14:22:57.734718 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aee823b2-67f9-4014-8015-f9094f636f20" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.734745 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="aee823b2-67f9-4014-8015-f9094f636f20" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.735012 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="aee823b2-67f9-4014-8015-f9094f636f20" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.735871 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.738362 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.739456 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.739596 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.739787 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.739966 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.740805 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.741030 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.741172 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.741318 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.753792 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b"] Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.827440 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.827488 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.827528 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8l25s\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-kube-api-access-8l25s\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.827551 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.827582 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.827966 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828166 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828214 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828459 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828584 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828642 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828807 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828872 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828905 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.828945 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.930541 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931160 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931212 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931242 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931325 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931369 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931402 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931484 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931524 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931557 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931587 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931714 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931757 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931807 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8l25s\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-kube-api-access-8l25s\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.931836 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.937118 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.938265 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.938455 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.941173 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.941980 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.942026 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.942209 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.942590 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.944403 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.945089 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.945295 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.946850 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.947786 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.947990 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:57 crc kubenswrapper[4869]: I0929 14:22:57.957865 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8l25s\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-kube-api-access-8l25s\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-htp7b\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:58 crc kubenswrapper[4869]: I0929 14:22:58.057442 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:22:58 crc kubenswrapper[4869]: I0929 14:22:58.622405 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b"] Sep 29 14:22:59 crc kubenswrapper[4869]: I0929 14:22:59.624942 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" event={"ID":"9f5b7d60-1238-45f5-a03f-28ede1b33ce0","Type":"ContainerStarted","Data":"39e568c1b6d6b53fe01eb1aaeb6914ce05684a2cf669e630da46193fbe247634"} Sep 29 14:22:59 crc kubenswrapper[4869]: I0929 14:22:59.625319 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" event={"ID":"9f5b7d60-1238-45f5-a03f-28ede1b33ce0","Type":"ContainerStarted","Data":"b79b84c8cec1bc13fa3bf0e1fa5a11bf70ea6acd627d7e9595572fa22bfb8625"} Sep 29 14:22:59 crc kubenswrapper[4869]: I0929 14:22:59.654969 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" podStartSLOduration=2.016325074 podStartE2EDuration="2.654926009s" podCreationTimestamp="2025-09-29 14:22:57 +0000 UTC" firstStartedPulling="2025-09-29 14:22:58.640768083 +0000 UTC m=+2505.081412403" lastFinishedPulling="2025-09-29 14:22:59.279369018 +0000 UTC m=+2505.720013338" observedRunningTime="2025-09-29 14:22:59.645525145 +0000 UTC m=+2506.086169465" watchObservedRunningTime="2025-09-29 14:22:59.654926009 +0000 UTC m=+2506.095570329" Sep 29 14:23:08 crc kubenswrapper[4869]: I0929 14:23:08.241989 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:23:08 crc kubenswrapper[4869]: E0929 14:23:08.243237 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:23:19 crc kubenswrapper[4869]: I0929 14:23:19.242748 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:23:19 crc kubenswrapper[4869]: E0929 14:23:19.244232 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:23:30 crc kubenswrapper[4869]: I0929 14:23:30.242161 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:23:30 crc kubenswrapper[4869]: E0929 14:23:30.243138 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:23:36 crc kubenswrapper[4869]: I0929 14:23:36.001214 4869 generic.go:334] "Generic (PLEG): container finished" podID="9f5b7d60-1238-45f5-a03f-28ede1b33ce0" containerID="39e568c1b6d6b53fe01eb1aaeb6914ce05684a2cf669e630da46193fbe247634" exitCode=0 Sep 29 14:23:36 crc kubenswrapper[4869]: I0929 14:23:36.001321 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" event={"ID":"9f5b7d60-1238-45f5-a03f-28ede1b33ce0","Type":"ContainerDied","Data":"39e568c1b6d6b53fe01eb1aaeb6914ce05684a2cf669e630da46193fbe247634"} Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.430965 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624320 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-ovn-default-certs-0\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624390 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8l25s\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-kube-api-access-8l25s\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624416 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-nova-combined-ca-bundle\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624461 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-bootstrap-combined-ca-bundle\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624483 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-telemetry-combined-ca-bundle\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624530 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-inventory\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624557 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624582 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-repo-setup-combined-ca-bundle\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624605 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624677 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ovn-combined-ca-bundle\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624702 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-neutron-metadata-combined-ca-bundle\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624725 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ceph\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624767 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ssh-key\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624819 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.624889 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-libvirt-combined-ca-bundle\") pod \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\" (UID: \"9f5b7d60-1238-45f5-a03f-28ede1b33ce0\") " Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.631519 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.631794 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.632652 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-kube-api-access-8l25s" (OuterVolumeSpecName: "kube-api-access-8l25s") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "kube-api-access-8l25s". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.632721 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.633003 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.633081 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.633110 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.633081 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.635508 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.636171 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.636753 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.650280 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ceph" (OuterVolumeSpecName: "ceph") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.651515 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.656840 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-inventory" (OuterVolumeSpecName: "inventory") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.659460 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9f5b7d60-1238-45f5-a03f-28ede1b33ce0" (UID: "9f5b7d60-1238-45f5-a03f-28ede1b33ce0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.727962 4869 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.727996 4869 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728009 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728019 4869 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728033 4869 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728044 4869 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728058 4869 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728067 4869 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728075 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728085 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728094 4869 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728105 4869 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728113 4869 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728122 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8l25s\" (UniqueName: \"kubernetes.io/projected/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-kube-api-access-8l25s\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:37 crc kubenswrapper[4869]: I0929 14:23:37.728131 4869 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5b7d60-1238-45f5-a03f-28ede1b33ce0-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.023931 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" event={"ID":"9f5b7d60-1238-45f5-a03f-28ede1b33ce0","Type":"ContainerDied","Data":"b79b84c8cec1bc13fa3bf0e1fa5a11bf70ea6acd627d7e9595572fa22bfb8625"} Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.023974 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b79b84c8cec1bc13fa3bf0e1fa5a11bf70ea6acd627d7e9595572fa22bfb8625" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.024275 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-htp7b" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.117739 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw"] Sep 29 14:23:38 crc kubenswrapper[4869]: E0929 14:23:38.118517 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f5b7d60-1238-45f5-a03f-28ede1b33ce0" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.118642 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f5b7d60-1238-45f5-a03f-28ede1b33ce0" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.118967 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f5b7d60-1238-45f5-a03f-28ede1b33ce0" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.120032 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.124078 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.124124 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.124244 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.124324 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.124097 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.128120 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw"] Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.164571 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd9t7\" (UniqueName: \"kubernetes.io/projected/a4536f8d-b2bb-47d1-ac50-093a51f12f65-kube-api-access-sd9t7\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.165025 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.165179 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.165349 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.268284 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd9t7\" (UniqueName: \"kubernetes.io/projected/a4536f8d-b2bb-47d1-ac50-093a51f12f65-kube-api-access-sd9t7\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.268495 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.268652 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.268818 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.272729 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.272823 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.275069 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.293786 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd9t7\" (UniqueName: \"kubernetes.io/projected/a4536f8d-b2bb-47d1-ac50-093a51f12f65-kube-api-access-sd9t7\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:38 crc kubenswrapper[4869]: I0929 14:23:38.502852 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:39 crc kubenswrapper[4869]: I0929 14:23:39.058282 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw"] Sep 29 14:23:40 crc kubenswrapper[4869]: I0929 14:23:40.045580 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" event={"ID":"a4536f8d-b2bb-47d1-ac50-093a51f12f65","Type":"ContainerStarted","Data":"985e053d4e2c0c6916ef895106de7aff07f015f19f6e0365bf5997668ed42dfc"} Sep 29 14:23:40 crc kubenswrapper[4869]: I0929 14:23:40.046003 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" event={"ID":"a4536f8d-b2bb-47d1-ac50-093a51f12f65","Type":"ContainerStarted","Data":"32f6dca454df24686c75442f78a830ee6593ebb053499cfdcd5e691d59be02e0"} Sep 29 14:23:40 crc kubenswrapper[4869]: I0929 14:23:40.071974 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" podStartSLOduration=1.6145212660000001 podStartE2EDuration="2.071954981s" podCreationTimestamp="2025-09-29 14:23:38 +0000 UTC" firstStartedPulling="2025-09-29 14:23:39.075770832 +0000 UTC m=+2545.516415152" lastFinishedPulling="2025-09-29 14:23:39.533204547 +0000 UTC m=+2545.973848867" observedRunningTime="2025-09-29 14:23:40.061627683 +0000 UTC m=+2546.502272013" watchObservedRunningTime="2025-09-29 14:23:40.071954981 +0000 UTC m=+2546.512599301" Sep 29 14:23:41 crc kubenswrapper[4869]: I0929 14:23:41.242035 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:23:41 crc kubenswrapper[4869]: E0929 14:23:41.242566 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:23:46 crc kubenswrapper[4869]: I0929 14:23:46.103401 4869 generic.go:334] "Generic (PLEG): container finished" podID="a4536f8d-b2bb-47d1-ac50-093a51f12f65" containerID="985e053d4e2c0c6916ef895106de7aff07f015f19f6e0365bf5997668ed42dfc" exitCode=0 Sep 29 14:23:46 crc kubenswrapper[4869]: I0929 14:23:46.103468 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" event={"ID":"a4536f8d-b2bb-47d1-ac50-093a51f12f65","Type":"ContainerDied","Data":"985e053d4e2c0c6916ef895106de7aff07f015f19f6e0365bf5997668ed42dfc"} Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.648596 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.757272 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd9t7\" (UniqueName: \"kubernetes.io/projected/a4536f8d-b2bb-47d1-ac50-093a51f12f65-kube-api-access-sd9t7\") pod \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.758326 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ssh-key\") pod \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.758504 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-inventory\") pod \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.758689 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ceph\") pod \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\" (UID: \"a4536f8d-b2bb-47d1-ac50-093a51f12f65\") " Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.763592 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4536f8d-b2bb-47d1-ac50-093a51f12f65-kube-api-access-sd9t7" (OuterVolumeSpecName: "kube-api-access-sd9t7") pod "a4536f8d-b2bb-47d1-ac50-093a51f12f65" (UID: "a4536f8d-b2bb-47d1-ac50-093a51f12f65"). InnerVolumeSpecName "kube-api-access-sd9t7". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.780710 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ceph" (OuterVolumeSpecName: "ceph") pod "a4536f8d-b2bb-47d1-ac50-093a51f12f65" (UID: "a4536f8d-b2bb-47d1-ac50-093a51f12f65"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.800968 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-inventory" (OuterVolumeSpecName: "inventory") pod "a4536f8d-b2bb-47d1-ac50-093a51f12f65" (UID: "a4536f8d-b2bb-47d1-ac50-093a51f12f65"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.803000 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a4536f8d-b2bb-47d1-ac50-093a51f12f65" (UID: "a4536f8d-b2bb-47d1-ac50-093a51f12f65"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.861584 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd9t7\" (UniqueName: \"kubernetes.io/projected/a4536f8d-b2bb-47d1-ac50-093a51f12f65-kube-api-access-sd9t7\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.861849 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.861955 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:47 crc kubenswrapper[4869]: I0929 14:23:47.862013 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4536f8d-b2bb-47d1-ac50-093a51f12f65-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.127750 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" event={"ID":"a4536f8d-b2bb-47d1-ac50-093a51f12f65","Type":"ContainerDied","Data":"32f6dca454df24686c75442f78a830ee6593ebb053499cfdcd5e691d59be02e0"} Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.127816 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32f6dca454df24686c75442f78a830ee6593ebb053499cfdcd5e691d59be02e0" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.128142 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.219877 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68"] Sep 29 14:23:48 crc kubenswrapper[4869]: E0929 14:23:48.220439 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4536f8d-b2bb-47d1-ac50-093a51f12f65" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.220466 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4536f8d-b2bb-47d1-ac50-093a51f12f65" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.220714 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4536f8d-b2bb-47d1-ac50-093a51f12f65" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.221848 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.239107 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.239154 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.239258 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68"] Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.239511 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.239876 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.239942 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.240077 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.371946 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.372004 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.372244 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql8l5\" (UniqueName: \"kubernetes.io/projected/694027a0-4ac3-49a7-ab93-7022d098b091-kube-api-access-ql8l5\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.372310 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/694027a0-4ac3-49a7-ab93-7022d098b091-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.372760 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.372913 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.475761 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.475825 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.475907 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql8l5\" (UniqueName: \"kubernetes.io/projected/694027a0-4ac3-49a7-ab93-7022d098b091-kube-api-access-ql8l5\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.475931 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/694027a0-4ac3-49a7-ab93-7022d098b091-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.476011 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.476043 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.476962 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/694027a0-4ac3-49a7-ab93-7022d098b091-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.483180 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.483253 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.484978 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.488097 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.494521 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql8l5\" (UniqueName: \"kubernetes.io/projected/694027a0-4ac3-49a7-ab93-7022d098b091-kube-api-access-ql8l5\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hgg68\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:48 crc kubenswrapper[4869]: I0929 14:23:48.541381 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:23:49 crc kubenswrapper[4869]: I0929 14:23:49.097189 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68"] Sep 29 14:23:49 crc kubenswrapper[4869]: I0929 14:23:49.137049 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" event={"ID":"694027a0-4ac3-49a7-ab93-7022d098b091","Type":"ContainerStarted","Data":"6542d74690785869520c981147f35d5894bf7eed8b4c80aa0234547729f110fe"} Sep 29 14:23:51 crc kubenswrapper[4869]: I0929 14:23:51.154685 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" event={"ID":"694027a0-4ac3-49a7-ab93-7022d098b091","Type":"ContainerStarted","Data":"b588e239c19a1c1a32c1dc92958f629e7dc9300f7663f27267ed93ee265b36da"} Sep 29 14:23:51 crc kubenswrapper[4869]: I0929 14:23:51.173711 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" podStartSLOduration=1.92030703 podStartE2EDuration="3.173682959s" podCreationTimestamp="2025-09-29 14:23:48 +0000 UTC" firstStartedPulling="2025-09-29 14:23:49.098344679 +0000 UTC m=+2555.538988999" lastFinishedPulling="2025-09-29 14:23:50.351720608 +0000 UTC m=+2556.792364928" observedRunningTime="2025-09-29 14:23:51.173111794 +0000 UTC m=+2557.613756114" watchObservedRunningTime="2025-09-29 14:23:51.173682959 +0000 UTC m=+2557.614327279" Sep 29 14:23:52 crc kubenswrapper[4869]: I0929 14:23:52.247194 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:23:52 crc kubenswrapper[4869]: E0929 14:23:52.247981 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:24:04 crc kubenswrapper[4869]: I0929 14:24:04.252853 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:24:04 crc kubenswrapper[4869]: E0929 14:24:04.256060 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:24:19 crc kubenswrapper[4869]: I0929 14:24:19.242119 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:24:19 crc kubenswrapper[4869]: E0929 14:24:19.243325 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:24:30 crc kubenswrapper[4869]: I0929 14:24:30.242226 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:24:30 crc kubenswrapper[4869]: I0929 14:24:30.627565 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"7e69c5c58d34e2db58fa3aef87c57c6d11c45ddd8cfa8a8dcc0f83dbfcc86973"} Sep 29 14:25:04 crc kubenswrapper[4869]: I0929 14:25:04.948857 4869 generic.go:334] "Generic (PLEG): container finished" podID="694027a0-4ac3-49a7-ab93-7022d098b091" containerID="b588e239c19a1c1a32c1dc92958f629e7dc9300f7663f27267ed93ee265b36da" exitCode=0 Sep 29 14:25:04 crc kubenswrapper[4869]: I0929 14:25:04.948948 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" event={"ID":"694027a0-4ac3-49a7-ab93-7022d098b091","Type":"ContainerDied","Data":"b588e239c19a1c1a32c1dc92958f629e7dc9300f7663f27267ed93ee265b36da"} Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.478876 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.643789 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ql8l5\" (UniqueName: \"kubernetes.io/projected/694027a0-4ac3-49a7-ab93-7022d098b091-kube-api-access-ql8l5\") pod \"694027a0-4ac3-49a7-ab93-7022d098b091\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.643846 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ovn-combined-ca-bundle\") pod \"694027a0-4ac3-49a7-ab93-7022d098b091\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.643871 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ssh-key\") pod \"694027a0-4ac3-49a7-ab93-7022d098b091\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.643909 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/694027a0-4ac3-49a7-ab93-7022d098b091-ovncontroller-config-0\") pod \"694027a0-4ac3-49a7-ab93-7022d098b091\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.643944 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ceph\") pod \"694027a0-4ac3-49a7-ab93-7022d098b091\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.644014 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-inventory\") pod \"694027a0-4ac3-49a7-ab93-7022d098b091\" (UID: \"694027a0-4ac3-49a7-ab93-7022d098b091\") " Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.652906 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ceph" (OuterVolumeSpecName: "ceph") pod "694027a0-4ac3-49a7-ab93-7022d098b091" (UID: "694027a0-4ac3-49a7-ab93-7022d098b091"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.653013 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "694027a0-4ac3-49a7-ab93-7022d098b091" (UID: "694027a0-4ac3-49a7-ab93-7022d098b091"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.653084 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/694027a0-4ac3-49a7-ab93-7022d098b091-kube-api-access-ql8l5" (OuterVolumeSpecName: "kube-api-access-ql8l5") pod "694027a0-4ac3-49a7-ab93-7022d098b091" (UID: "694027a0-4ac3-49a7-ab93-7022d098b091"). InnerVolumeSpecName "kube-api-access-ql8l5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.674276 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/694027a0-4ac3-49a7-ab93-7022d098b091-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "694027a0-4ac3-49a7-ab93-7022d098b091" (UID: "694027a0-4ac3-49a7-ab93-7022d098b091"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.677953 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "694027a0-4ac3-49a7-ab93-7022d098b091" (UID: "694027a0-4ac3-49a7-ab93-7022d098b091"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.692745 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-inventory" (OuterVolumeSpecName: "inventory") pod "694027a0-4ac3-49a7-ab93-7022d098b091" (UID: "694027a0-4ac3-49a7-ab93-7022d098b091"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.746601 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.746654 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ql8l5\" (UniqueName: \"kubernetes.io/projected/694027a0-4ac3-49a7-ab93-7022d098b091-kube-api-access-ql8l5\") on node \"crc\" DevicePath \"\"" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.746668 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.746677 4869 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.746687 4869 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/694027a0-4ac3-49a7-ab93-7022d098b091-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.746697 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/694027a0-4ac3-49a7-ab93-7022d098b091-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.972375 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" event={"ID":"694027a0-4ac3-49a7-ab93-7022d098b091","Type":"ContainerDied","Data":"6542d74690785869520c981147f35d5894bf7eed8b4c80aa0234547729f110fe"} Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.972432 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6542d74690785869520c981147f35d5894bf7eed8b4c80aa0234547729f110fe" Sep 29 14:25:06 crc kubenswrapper[4869]: I0929 14:25:06.972452 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hgg68" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.079056 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f"] Sep 29 14:25:07 crc kubenswrapper[4869]: E0929 14:25:07.079557 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="694027a0-4ac3-49a7-ab93-7022d098b091" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.079577 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="694027a0-4ac3-49a7-ab93-7022d098b091" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.079784 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="694027a0-4ac3-49a7-ab93-7022d098b091" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.080512 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.085799 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.085956 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.085977 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.086133 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.086361 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.086504 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.087776 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.103094 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f"] Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.257391 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.257456 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.257544 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.257725 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnwjj\" (UniqueName: \"kubernetes.io/projected/9edd7e05-abde-4406-af59-37f1ea0e8b73-kube-api-access-xnwjj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.257888 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.257961 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.257989 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.360520 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.361035 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.361096 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnwjj\" (UniqueName: \"kubernetes.io/projected/9edd7e05-abde-4406-af59-37f1ea0e8b73-kube-api-access-xnwjj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.361177 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.361209 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.361228 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.361300 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.370326 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.372447 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.374361 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.376304 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.379219 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.380406 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.385204 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnwjj\" (UniqueName: \"kubernetes.io/projected/9edd7e05-abde-4406-af59-37f1ea0e8b73-kube-api-access-xnwjj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:07 crc kubenswrapper[4869]: I0929 14:25:07.408115 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:25:08 crc kubenswrapper[4869]: I0929 14:25:08.017023 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f"] Sep 29 14:25:09 crc kubenswrapper[4869]: I0929 14:25:09.004876 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" event={"ID":"9edd7e05-abde-4406-af59-37f1ea0e8b73","Type":"ContainerStarted","Data":"099ac0f2f67f6d66e8869c70ed4e1fbcf1633b7810e20df19dc7bdf046db8fea"} Sep 29 14:25:10 crc kubenswrapper[4869]: I0929 14:25:10.018751 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" event={"ID":"9edd7e05-abde-4406-af59-37f1ea0e8b73","Type":"ContainerStarted","Data":"37501be49935839de438f5d71fe9518f6c9c4ceaaca6cadd7c88cd55763171e2"} Sep 29 14:25:10 crc kubenswrapper[4869]: I0929 14:25:10.059228 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" podStartSLOduration=2.274452522 podStartE2EDuration="3.059196234s" podCreationTimestamp="2025-09-29 14:25:07 +0000 UTC" firstStartedPulling="2025-09-29 14:25:08.03114056 +0000 UTC m=+2634.471784880" lastFinishedPulling="2025-09-29 14:25:08.815884272 +0000 UTC m=+2635.256528592" observedRunningTime="2025-09-29 14:25:10.038475067 +0000 UTC m=+2636.479119427" watchObservedRunningTime="2025-09-29 14:25:10.059196234 +0000 UTC m=+2636.499840554" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.660675 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-njxcb"] Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.665480 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.674653 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-njxcb"] Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.726529 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-utilities\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.726596 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8kb9\" (UniqueName: \"kubernetes.io/projected/586a4722-af81-4bc1-a664-e45109b492f4-kube-api-access-m8kb9\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.726930 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-catalog-content\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.828925 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-catalog-content\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.829631 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-catalog-content\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.829831 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-utilities\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.829968 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8kb9\" (UniqueName: \"kubernetes.io/projected/586a4722-af81-4bc1-a664-e45109b492f4-kube-api-access-m8kb9\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.830161 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-utilities\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.856585 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8kb9\" (UniqueName: \"kubernetes.io/projected/586a4722-af81-4bc1-a664-e45109b492f4-kube-api-access-m8kb9\") pod \"community-operators-njxcb\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:45 crc kubenswrapper[4869]: I0929 14:25:45.992892 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:46 crc kubenswrapper[4869]: I0929 14:25:46.577060 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-njxcb"] Sep 29 14:25:47 crc kubenswrapper[4869]: I0929 14:25:47.417785 4869 generic.go:334] "Generic (PLEG): container finished" podID="586a4722-af81-4bc1-a664-e45109b492f4" containerID="ee34e87fa1f211b23936f2efa84f3e191ac9517c3d21b7ca9877f3448f758edf" exitCode=0 Sep 29 14:25:47 crc kubenswrapper[4869]: I0929 14:25:47.417878 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-njxcb" event={"ID":"586a4722-af81-4bc1-a664-e45109b492f4","Type":"ContainerDied","Data":"ee34e87fa1f211b23936f2efa84f3e191ac9517c3d21b7ca9877f3448f758edf"} Sep 29 14:25:47 crc kubenswrapper[4869]: I0929 14:25:47.418236 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-njxcb" event={"ID":"586a4722-af81-4bc1-a664-e45109b492f4","Type":"ContainerStarted","Data":"6aba4c74743af083dfbae509963f8cc64634a730f83f0e3f543c097f06848f11"} Sep 29 14:25:47 crc kubenswrapper[4869]: I0929 14:25:47.420321 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:25:50 crc kubenswrapper[4869]: I0929 14:25:50.448366 4869 generic.go:334] "Generic (PLEG): container finished" podID="586a4722-af81-4bc1-a664-e45109b492f4" containerID="c546edfd22475899530e0bd046d66d52b69812eebc5a5e2cb5f9c253b449cc45" exitCode=0 Sep 29 14:25:50 crc kubenswrapper[4869]: I0929 14:25:50.448655 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-njxcb" event={"ID":"586a4722-af81-4bc1-a664-e45109b492f4","Type":"ContainerDied","Data":"c546edfd22475899530e0bd046d66d52b69812eebc5a5e2cb5f9c253b449cc45"} Sep 29 14:25:52 crc kubenswrapper[4869]: I0929 14:25:52.484250 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-njxcb" event={"ID":"586a4722-af81-4bc1-a664-e45109b492f4","Type":"ContainerStarted","Data":"c2ef10d77e49cea09637b70912392c6e3420973e50d403fda7e0d5248c37639e"} Sep 29 14:25:52 crc kubenswrapper[4869]: I0929 14:25:52.505911 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-njxcb" podStartSLOduration=3.639219292 podStartE2EDuration="7.5058881s" podCreationTimestamp="2025-09-29 14:25:45 +0000 UTC" firstStartedPulling="2025-09-29 14:25:47.420025724 +0000 UTC m=+2673.860670044" lastFinishedPulling="2025-09-29 14:25:51.286694532 +0000 UTC m=+2677.727338852" observedRunningTime="2025-09-29 14:25:52.50126087 +0000 UTC m=+2678.941905190" watchObservedRunningTime="2025-09-29 14:25:52.5058881 +0000 UTC m=+2678.946532420" Sep 29 14:25:55 crc kubenswrapper[4869]: I0929 14:25:55.993076 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:55 crc kubenswrapper[4869]: I0929 14:25:55.994767 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:56 crc kubenswrapper[4869]: I0929 14:25:56.044029 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:56 crc kubenswrapper[4869]: I0929 14:25:56.568185 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:56 crc kubenswrapper[4869]: I0929 14:25:56.616256 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-njxcb"] Sep 29 14:25:58 crc kubenswrapper[4869]: I0929 14:25:58.539746 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-njxcb" podUID="586a4722-af81-4bc1-a664-e45109b492f4" containerName="registry-server" containerID="cri-o://c2ef10d77e49cea09637b70912392c6e3420973e50d403fda7e0d5248c37639e" gracePeriod=2 Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.551689 4869 generic.go:334] "Generic (PLEG): container finished" podID="586a4722-af81-4bc1-a664-e45109b492f4" containerID="c2ef10d77e49cea09637b70912392c6e3420973e50d403fda7e0d5248c37639e" exitCode=0 Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.551834 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-njxcb" event={"ID":"586a4722-af81-4bc1-a664-e45109b492f4","Type":"ContainerDied","Data":"c2ef10d77e49cea09637b70912392c6e3420973e50d403fda7e0d5248c37639e"} Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.552217 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-njxcb" event={"ID":"586a4722-af81-4bc1-a664-e45109b492f4","Type":"ContainerDied","Data":"6aba4c74743af083dfbae509963f8cc64634a730f83f0e3f543c097f06848f11"} Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.552249 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6aba4c74743af083dfbae509963f8cc64634a730f83f0e3f543c097f06848f11" Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.603848 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.742999 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8kb9\" (UniqueName: \"kubernetes.io/projected/586a4722-af81-4bc1-a664-e45109b492f4-kube-api-access-m8kb9\") pod \"586a4722-af81-4bc1-a664-e45109b492f4\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.743220 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-utilities\") pod \"586a4722-af81-4bc1-a664-e45109b492f4\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.743473 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-catalog-content\") pod \"586a4722-af81-4bc1-a664-e45109b492f4\" (UID: \"586a4722-af81-4bc1-a664-e45109b492f4\") " Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.744225 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-utilities" (OuterVolumeSpecName: "utilities") pod "586a4722-af81-4bc1-a664-e45109b492f4" (UID: "586a4722-af81-4bc1-a664-e45109b492f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.751073 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/586a4722-af81-4bc1-a664-e45109b492f4-kube-api-access-m8kb9" (OuterVolumeSpecName: "kube-api-access-m8kb9") pod "586a4722-af81-4bc1-a664-e45109b492f4" (UID: "586a4722-af81-4bc1-a664-e45109b492f4"). InnerVolumeSpecName "kube-api-access-m8kb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.797100 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "586a4722-af81-4bc1-a664-e45109b492f4" (UID: "586a4722-af81-4bc1-a664-e45109b492f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.846426 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8kb9\" (UniqueName: \"kubernetes.io/projected/586a4722-af81-4bc1-a664-e45109b492f4-kube-api-access-m8kb9\") on node \"crc\" DevicePath \"\"" Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.846461 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:25:59 crc kubenswrapper[4869]: I0929 14:25:59.846470 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/586a4722-af81-4bc1-a664-e45109b492f4-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:26:00 crc kubenswrapper[4869]: I0929 14:26:00.573352 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-njxcb" Sep 29 14:26:00 crc kubenswrapper[4869]: I0929 14:26:00.612317 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-njxcb"] Sep 29 14:26:00 crc kubenswrapper[4869]: I0929 14:26:00.623558 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-njxcb"] Sep 29 14:26:02 crc kubenswrapper[4869]: I0929 14:26:02.258386 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="586a4722-af81-4bc1-a664-e45109b492f4" path="/var/lib/kubelet/pods/586a4722-af81-4bc1-a664-e45109b492f4/volumes" Sep 29 14:26:42 crc kubenswrapper[4869]: I0929 14:26:42.056422 4869 generic.go:334] "Generic (PLEG): container finished" podID="9edd7e05-abde-4406-af59-37f1ea0e8b73" containerID="37501be49935839de438f5d71fe9518f6c9c4ceaaca6cadd7c88cd55763171e2" exitCode=0 Sep 29 14:26:42 crc kubenswrapper[4869]: I0929 14:26:42.056537 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" event={"ID":"9edd7e05-abde-4406-af59-37f1ea0e8b73","Type":"ContainerDied","Data":"37501be49935839de438f5d71fe9518f6c9c4ceaaca6cadd7c88cd55763171e2"} Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.531753 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.605728 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-metadata-combined-ca-bundle\") pod \"9edd7e05-abde-4406-af59-37f1ea0e8b73\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.605832 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ssh-key\") pod \"9edd7e05-abde-4406-af59-37f1ea0e8b73\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.605853 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ceph\") pod \"9edd7e05-abde-4406-af59-37f1ea0e8b73\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.605894 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnwjj\" (UniqueName: \"kubernetes.io/projected/9edd7e05-abde-4406-af59-37f1ea0e8b73-kube-api-access-xnwjj\") pod \"9edd7e05-abde-4406-af59-37f1ea0e8b73\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.605976 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-ovn-metadata-agent-neutron-config-0\") pod \"9edd7e05-abde-4406-af59-37f1ea0e8b73\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.606190 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-inventory\") pod \"9edd7e05-abde-4406-af59-37f1ea0e8b73\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.606269 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-nova-metadata-neutron-config-0\") pod \"9edd7e05-abde-4406-af59-37f1ea0e8b73\" (UID: \"9edd7e05-abde-4406-af59-37f1ea0e8b73\") " Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.613464 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ceph" (OuterVolumeSpecName: "ceph") pod "9edd7e05-abde-4406-af59-37f1ea0e8b73" (UID: "9edd7e05-abde-4406-af59-37f1ea0e8b73"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.614292 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9edd7e05-abde-4406-af59-37f1ea0e8b73-kube-api-access-xnwjj" (OuterVolumeSpecName: "kube-api-access-xnwjj") pod "9edd7e05-abde-4406-af59-37f1ea0e8b73" (UID: "9edd7e05-abde-4406-af59-37f1ea0e8b73"). InnerVolumeSpecName "kube-api-access-xnwjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.615356 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "9edd7e05-abde-4406-af59-37f1ea0e8b73" (UID: "9edd7e05-abde-4406-af59-37f1ea0e8b73"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.640593 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-inventory" (OuterVolumeSpecName: "inventory") pod "9edd7e05-abde-4406-af59-37f1ea0e8b73" (UID: "9edd7e05-abde-4406-af59-37f1ea0e8b73"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.641338 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9edd7e05-abde-4406-af59-37f1ea0e8b73" (UID: "9edd7e05-abde-4406-af59-37f1ea0e8b73"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.641874 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "9edd7e05-abde-4406-af59-37f1ea0e8b73" (UID: "9edd7e05-abde-4406-af59-37f1ea0e8b73"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.645114 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "9edd7e05-abde-4406-af59-37f1ea0e8b73" (UID: "9edd7e05-abde-4406-af59-37f1ea0e8b73"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.709480 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.709746 4869 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.709768 4869 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.709782 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.709796 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.709807 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnwjj\" (UniqueName: \"kubernetes.io/projected/9edd7e05-abde-4406-af59-37f1ea0e8b73-kube-api-access-xnwjj\") on node \"crc\" DevicePath \"\"" Sep 29 14:26:43 crc kubenswrapper[4869]: I0929 14:26:43.709820 4869 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9edd7e05-abde-4406-af59-37f1ea0e8b73-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.077077 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" event={"ID":"9edd7e05-abde-4406-af59-37f1ea0e8b73","Type":"ContainerDied","Data":"099ac0f2f67f6d66e8869c70ed4e1fbcf1633b7810e20df19dc7bdf046db8fea"} Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.077136 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="099ac0f2f67f6d66e8869c70ed4e1fbcf1633b7810e20df19dc7bdf046db8fea" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.077248 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.197540 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm"] Sep 29 14:26:44 crc kubenswrapper[4869]: E0929 14:26:44.198004 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="586a4722-af81-4bc1-a664-e45109b492f4" containerName="registry-server" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.198024 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="586a4722-af81-4bc1-a664-e45109b492f4" containerName="registry-server" Sep 29 14:26:44 crc kubenswrapper[4869]: E0929 14:26:44.198032 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9edd7e05-abde-4406-af59-37f1ea0e8b73" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.198042 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9edd7e05-abde-4406-af59-37f1ea0e8b73" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Sep 29 14:26:44 crc kubenswrapper[4869]: E0929 14:26:44.198078 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="586a4722-af81-4bc1-a664-e45109b492f4" containerName="extract-content" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.198086 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="586a4722-af81-4bc1-a664-e45109b492f4" containerName="extract-content" Sep 29 14:26:44 crc kubenswrapper[4869]: E0929 14:26:44.198100 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="586a4722-af81-4bc1-a664-e45109b492f4" containerName="extract-utilities" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.198107 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="586a4722-af81-4bc1-a664-e45109b492f4" containerName="extract-utilities" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.198303 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9edd7e05-abde-4406-af59-37f1ea0e8b73" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.198321 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="586a4722-af81-4bc1-a664-e45109b492f4" containerName="registry-server" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.199214 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.201593 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.203666 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.203810 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.204257 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.204418 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.207057 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.215061 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm"] Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.329300 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwtv4\" (UniqueName: \"kubernetes.io/projected/03e348b9-33ba-41f9-ac42-792fd12e4e7c-kube-api-access-pwtv4\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.329818 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.329890 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.330338 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.330396 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.330714 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.432671 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.432806 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwtv4\" (UniqueName: \"kubernetes.io/projected/03e348b9-33ba-41f9-ac42-792fd12e4e7c-kube-api-access-pwtv4\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.432862 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.432898 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.432982 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.433008 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.437465 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.437479 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.438897 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.439336 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.441214 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.453507 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwtv4\" (UniqueName: \"kubernetes.io/projected/03e348b9-33ba-41f9-ac42-792fd12e4e7c-kube-api-access-pwtv4\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:44 crc kubenswrapper[4869]: I0929 14:26:44.549569 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:26:45 crc kubenswrapper[4869]: I0929 14:26:45.098893 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm"] Sep 29 14:26:46 crc kubenswrapper[4869]: I0929 14:26:46.106216 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" event={"ID":"03e348b9-33ba-41f9-ac42-792fd12e4e7c","Type":"ContainerStarted","Data":"81b2a15215a3597e94eaa832ec8a17f9fa72b48bc5c1199e5a2c65ef8309e1f0"} Sep 29 14:26:46 crc kubenswrapper[4869]: I0929 14:26:46.106734 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" event={"ID":"03e348b9-33ba-41f9-ac42-792fd12e4e7c","Type":"ContainerStarted","Data":"68656475f121f660184de04a75d916b866458abeb44bd23db1d18f324f6880bf"} Sep 29 14:26:46 crc kubenswrapper[4869]: I0929 14:26:46.136300 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" podStartSLOduration=1.6357069 podStartE2EDuration="2.136270849s" podCreationTimestamp="2025-09-29 14:26:44 +0000 UTC" firstStartedPulling="2025-09-29 14:26:45.107914176 +0000 UTC m=+2731.548558496" lastFinishedPulling="2025-09-29 14:26:45.608478125 +0000 UTC m=+2732.049122445" observedRunningTime="2025-09-29 14:26:46.122908093 +0000 UTC m=+2732.563552433" watchObservedRunningTime="2025-09-29 14:26:46.136270849 +0000 UTC m=+2732.576915189" Sep 29 14:26:50 crc kubenswrapper[4869]: I0929 14:26:50.656691 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:26:50 crc kubenswrapper[4869]: I0929 14:26:50.657551 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.316662 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nxpt8"] Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.320210 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.321239 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nxpt8"] Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.397863 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtpqg\" (UniqueName: \"kubernetes.io/projected/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-kube-api-access-wtpqg\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.397970 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-utilities\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.398175 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-catalog-content\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.500444 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-catalog-content\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.500549 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtpqg\" (UniqueName: \"kubernetes.io/projected/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-kube-api-access-wtpqg\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.500629 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-utilities\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.501144 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-utilities\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.501393 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-catalog-content\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.541979 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtpqg\" (UniqueName: \"kubernetes.io/projected/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-kube-api-access-wtpqg\") pod \"redhat-marketplace-nxpt8\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:58 crc kubenswrapper[4869]: I0929 14:26:58.649463 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:26:59 crc kubenswrapper[4869]: I0929 14:26:59.213267 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nxpt8"] Sep 29 14:26:59 crc kubenswrapper[4869]: W0929 14:26:59.216165 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0560d2c_a29b_4bc1_8599_fc5ec30d849a.slice/crio-f0e2a27cd43f0e2ec06fd51a6f6500f98b89bf5ac35eb2738b89a493b5e26551 WatchSource:0}: Error finding container f0e2a27cd43f0e2ec06fd51a6f6500f98b89bf5ac35eb2738b89a493b5e26551: Status 404 returned error can't find the container with id f0e2a27cd43f0e2ec06fd51a6f6500f98b89bf5ac35eb2738b89a493b5e26551 Sep 29 14:26:59 crc kubenswrapper[4869]: I0929 14:26:59.237928 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nxpt8" event={"ID":"b0560d2c-a29b-4bc1-8599-fc5ec30d849a","Type":"ContainerStarted","Data":"f0e2a27cd43f0e2ec06fd51a6f6500f98b89bf5ac35eb2738b89a493b5e26551"} Sep 29 14:27:00 crc kubenswrapper[4869]: I0929 14:27:00.253654 4869 generic.go:334] "Generic (PLEG): container finished" podID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerID="c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457" exitCode=0 Sep 29 14:27:00 crc kubenswrapper[4869]: I0929 14:27:00.255861 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nxpt8" event={"ID":"b0560d2c-a29b-4bc1-8599-fc5ec30d849a","Type":"ContainerDied","Data":"c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457"} Sep 29 14:27:02 crc kubenswrapper[4869]: I0929 14:27:02.279987 4869 generic.go:334] "Generic (PLEG): container finished" podID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerID="01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75" exitCode=0 Sep 29 14:27:02 crc kubenswrapper[4869]: I0929 14:27:02.280002 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nxpt8" event={"ID":"b0560d2c-a29b-4bc1-8599-fc5ec30d849a","Type":"ContainerDied","Data":"01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75"} Sep 29 14:27:03 crc kubenswrapper[4869]: I0929 14:27:03.310378 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nxpt8" event={"ID":"b0560d2c-a29b-4bc1-8599-fc5ec30d849a","Type":"ContainerStarted","Data":"bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc"} Sep 29 14:27:03 crc kubenswrapper[4869]: I0929 14:27:03.331304 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nxpt8" podStartSLOduration=2.863905169 podStartE2EDuration="5.331283364s" podCreationTimestamp="2025-09-29 14:26:58 +0000 UTC" firstStartedPulling="2025-09-29 14:27:00.255808583 +0000 UTC m=+2746.696452893" lastFinishedPulling="2025-09-29 14:27:02.723186758 +0000 UTC m=+2749.163831088" observedRunningTime="2025-09-29 14:27:03.330437802 +0000 UTC m=+2749.771082122" watchObservedRunningTime="2025-09-29 14:27:03.331283364 +0000 UTC m=+2749.771927694" Sep 29 14:27:08 crc kubenswrapper[4869]: I0929 14:27:08.649978 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:27:08 crc kubenswrapper[4869]: I0929 14:27:08.650749 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:27:08 crc kubenswrapper[4869]: I0929 14:27:08.699913 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:27:09 crc kubenswrapper[4869]: I0929 14:27:09.444988 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:27:09 crc kubenswrapper[4869]: I0929 14:27:09.506722 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nxpt8"] Sep 29 14:27:11 crc kubenswrapper[4869]: I0929 14:27:11.407024 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nxpt8" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerName="registry-server" containerID="cri-o://bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc" gracePeriod=2 Sep 29 14:27:11 crc kubenswrapper[4869]: I0929 14:27:11.943522 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:27:11 crc kubenswrapper[4869]: I0929 14:27:11.961953 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtpqg\" (UniqueName: \"kubernetes.io/projected/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-kube-api-access-wtpqg\") pod \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " Sep 29 14:27:11 crc kubenswrapper[4869]: I0929 14:27:11.962064 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-catalog-content\") pod \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " Sep 29 14:27:11 crc kubenswrapper[4869]: I0929 14:27:11.962096 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-utilities\") pod \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\" (UID: \"b0560d2c-a29b-4bc1-8599-fc5ec30d849a\") " Sep 29 14:27:11 crc kubenswrapper[4869]: I0929 14:27:11.963800 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-utilities" (OuterVolumeSpecName: "utilities") pod "b0560d2c-a29b-4bc1-8599-fc5ec30d849a" (UID: "b0560d2c-a29b-4bc1-8599-fc5ec30d849a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:27:11 crc kubenswrapper[4869]: I0929 14:27:11.971706 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-kube-api-access-wtpqg" (OuterVolumeSpecName: "kube-api-access-wtpqg") pod "b0560d2c-a29b-4bc1-8599-fc5ec30d849a" (UID: "b0560d2c-a29b-4bc1-8599-fc5ec30d849a"). InnerVolumeSpecName "kube-api-access-wtpqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:27:11 crc kubenswrapper[4869]: I0929 14:27:11.990109 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b0560d2c-a29b-4bc1-8599-fc5ec30d849a" (UID: "b0560d2c-a29b-4bc1-8599-fc5ec30d849a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.064169 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtpqg\" (UniqueName: \"kubernetes.io/projected/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-kube-api-access-wtpqg\") on node \"crc\" DevicePath \"\"" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.064230 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.064246 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0560d2c-a29b-4bc1-8599-fc5ec30d849a-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.419657 4869 generic.go:334] "Generic (PLEG): container finished" podID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerID="bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc" exitCode=0 Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.419717 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nxpt8" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.419725 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nxpt8" event={"ID":"b0560d2c-a29b-4bc1-8599-fc5ec30d849a","Type":"ContainerDied","Data":"bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc"} Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.419764 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nxpt8" event={"ID":"b0560d2c-a29b-4bc1-8599-fc5ec30d849a","Type":"ContainerDied","Data":"f0e2a27cd43f0e2ec06fd51a6f6500f98b89bf5ac35eb2738b89a493b5e26551"} Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.419785 4869 scope.go:117] "RemoveContainer" containerID="bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.455984 4869 scope.go:117] "RemoveContainer" containerID="01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.456637 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nxpt8"] Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.468904 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nxpt8"] Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.486793 4869 scope.go:117] "RemoveContainer" containerID="c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.532530 4869 scope.go:117] "RemoveContainer" containerID="bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc" Sep 29 14:27:12 crc kubenswrapper[4869]: E0929 14:27:12.533359 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc\": container with ID starting with bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc not found: ID does not exist" containerID="bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.533419 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc"} err="failed to get container status \"bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc\": rpc error: code = NotFound desc = could not find container \"bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc\": container with ID starting with bf7ba0af98d8b1a1eb1258e013a55cabd86d1989e3ac60f2ef18a3afec98e7fc not found: ID does not exist" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.533459 4869 scope.go:117] "RemoveContainer" containerID="01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75" Sep 29 14:27:12 crc kubenswrapper[4869]: E0929 14:27:12.534257 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75\": container with ID starting with 01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75 not found: ID does not exist" containerID="01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.534291 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75"} err="failed to get container status \"01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75\": rpc error: code = NotFound desc = could not find container \"01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75\": container with ID starting with 01acda33760839103a79ecaaa2aac0cd241997358c7d7acb9831f7671d8a8a75 not found: ID does not exist" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.534308 4869 scope.go:117] "RemoveContainer" containerID="c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457" Sep 29 14:27:12 crc kubenswrapper[4869]: E0929 14:27:12.534816 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457\": container with ID starting with c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457 not found: ID does not exist" containerID="c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457" Sep 29 14:27:12 crc kubenswrapper[4869]: I0929 14:27:12.534861 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457"} err="failed to get container status \"c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457\": rpc error: code = NotFound desc = could not find container \"c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457\": container with ID starting with c3ea1b1f1b30bf3068da5457f1ebf0a8eb90d80f173159a020e707675c3f6457 not found: ID does not exist" Sep 29 14:27:14 crc kubenswrapper[4869]: I0929 14:27:14.259160 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" path="/var/lib/kubelet/pods/b0560d2c-a29b-4bc1-8599-fc5ec30d849a/volumes" Sep 29 14:27:20 crc kubenswrapper[4869]: I0929 14:27:20.657662 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:27:20 crc kubenswrapper[4869]: I0929 14:27:20.658924 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:27:50 crc kubenswrapper[4869]: I0929 14:27:50.657530 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:27:50 crc kubenswrapper[4869]: I0929 14:27:50.658595 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:27:50 crc kubenswrapper[4869]: I0929 14:27:50.658709 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:27:50 crc kubenswrapper[4869]: I0929 14:27:50.659932 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7e69c5c58d34e2db58fa3aef87c57c6d11c45ddd8cfa8a8dcc0f83dbfcc86973"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:27:50 crc kubenswrapper[4869]: I0929 14:27:50.659987 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://7e69c5c58d34e2db58fa3aef87c57c6d11c45ddd8cfa8a8dcc0f83dbfcc86973" gracePeriod=600 Sep 29 14:27:50 crc kubenswrapper[4869]: I0929 14:27:50.895954 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="7e69c5c58d34e2db58fa3aef87c57c6d11c45ddd8cfa8a8dcc0f83dbfcc86973" exitCode=0 Sep 29 14:27:50 crc kubenswrapper[4869]: I0929 14:27:50.896019 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"7e69c5c58d34e2db58fa3aef87c57c6d11c45ddd8cfa8a8dcc0f83dbfcc86973"} Sep 29 14:27:50 crc kubenswrapper[4869]: I0929 14:27:50.896080 4869 scope.go:117] "RemoveContainer" containerID="7f71c445a3be4708f52e94c2ee7a25cfcc9ea6d346b5137aba037db0bbf0838f" Sep 29 14:27:51 crc kubenswrapper[4869]: I0929 14:27:51.911003 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e"} Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.103312 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cjv8f"] Sep 29 14:28:40 crc kubenswrapper[4869]: E0929 14:28:40.104601 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerName="registry-server" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.104643 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerName="registry-server" Sep 29 14:28:40 crc kubenswrapper[4869]: E0929 14:28:40.104679 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerName="extract-content" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.104686 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerName="extract-content" Sep 29 14:28:40 crc kubenswrapper[4869]: E0929 14:28:40.104724 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerName="extract-utilities" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.104731 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerName="extract-utilities" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.104936 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0560d2c-a29b-4bc1-8599-fc5ec30d849a" containerName="registry-server" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.106640 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.128503 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cjv8f"] Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.274568 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbrtz\" (UniqueName: \"kubernetes.io/projected/d61c213d-26bb-4470-87e4-aef57d9c8512-kube-api-access-dbrtz\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.275068 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-utilities\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.275396 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-catalog-content\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.377437 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-utilities\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.377783 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-catalog-content\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.377972 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbrtz\" (UniqueName: \"kubernetes.io/projected/d61c213d-26bb-4470-87e4-aef57d9c8512-kube-api-access-dbrtz\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.378488 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-catalog-content\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.378801 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-utilities\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.414070 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbrtz\" (UniqueName: \"kubernetes.io/projected/d61c213d-26bb-4470-87e4-aef57d9c8512-kube-api-access-dbrtz\") pod \"redhat-operators-cjv8f\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.430792 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:40 crc kubenswrapper[4869]: I0929 14:28:40.996328 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cjv8f"] Sep 29 14:28:41 crc kubenswrapper[4869]: I0929 14:28:41.486447 4869 generic.go:334] "Generic (PLEG): container finished" podID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerID="e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa" exitCode=0 Sep 29 14:28:41 crc kubenswrapper[4869]: I0929 14:28:41.486586 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjv8f" event={"ID":"d61c213d-26bb-4470-87e4-aef57d9c8512","Type":"ContainerDied","Data":"e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa"} Sep 29 14:28:41 crc kubenswrapper[4869]: I0929 14:28:41.486986 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjv8f" event={"ID":"d61c213d-26bb-4470-87e4-aef57d9c8512","Type":"ContainerStarted","Data":"369073e957be34c7391b197b7e6a3e97d57b9521300afd4c0af9c427afef976b"} Sep 29 14:28:42 crc kubenswrapper[4869]: I0929 14:28:42.502134 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjv8f" event={"ID":"d61c213d-26bb-4470-87e4-aef57d9c8512","Type":"ContainerStarted","Data":"1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905"} Sep 29 14:28:43 crc kubenswrapper[4869]: I0929 14:28:43.515019 4869 generic.go:334] "Generic (PLEG): container finished" podID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerID="1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905" exitCode=0 Sep 29 14:28:43 crc kubenswrapper[4869]: I0929 14:28:43.515166 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjv8f" event={"ID":"d61c213d-26bb-4470-87e4-aef57d9c8512","Type":"ContainerDied","Data":"1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905"} Sep 29 14:28:44 crc kubenswrapper[4869]: I0929 14:28:44.529242 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjv8f" event={"ID":"d61c213d-26bb-4470-87e4-aef57d9c8512","Type":"ContainerStarted","Data":"f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9"} Sep 29 14:28:44 crc kubenswrapper[4869]: I0929 14:28:44.557992 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cjv8f" podStartSLOduration=2.060500822 podStartE2EDuration="4.557965508s" podCreationTimestamp="2025-09-29 14:28:40 +0000 UTC" firstStartedPulling="2025-09-29 14:28:41.489683649 +0000 UTC m=+2847.930327969" lastFinishedPulling="2025-09-29 14:28:43.987148335 +0000 UTC m=+2850.427792655" observedRunningTime="2025-09-29 14:28:44.549113416 +0000 UTC m=+2850.989757756" watchObservedRunningTime="2025-09-29 14:28:44.557965508 +0000 UTC m=+2850.998609828" Sep 29 14:28:50 crc kubenswrapper[4869]: I0929 14:28:50.431892 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:50 crc kubenswrapper[4869]: I0929 14:28:50.432666 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:50 crc kubenswrapper[4869]: I0929 14:28:50.483804 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:50 crc kubenswrapper[4869]: I0929 14:28:50.636037 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:50 crc kubenswrapper[4869]: I0929 14:28:50.729195 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cjv8f"] Sep 29 14:28:52 crc kubenswrapper[4869]: I0929 14:28:52.607194 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cjv8f" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerName="registry-server" containerID="cri-o://f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9" gracePeriod=2 Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.597654 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.626118 4869 generic.go:334] "Generic (PLEG): container finished" podID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerID="f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9" exitCode=0 Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.626170 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjv8f" event={"ID":"d61c213d-26bb-4470-87e4-aef57d9c8512","Type":"ContainerDied","Data":"f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9"} Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.626204 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjv8f" event={"ID":"d61c213d-26bb-4470-87e4-aef57d9c8512","Type":"ContainerDied","Data":"369073e957be34c7391b197b7e6a3e97d57b9521300afd4c0af9c427afef976b"} Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.626224 4869 scope.go:117] "RemoveContainer" containerID="f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.626374 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cjv8f" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.664953 4869 scope.go:117] "RemoveContainer" containerID="1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.687537 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbrtz\" (UniqueName: \"kubernetes.io/projected/d61c213d-26bb-4470-87e4-aef57d9c8512-kube-api-access-dbrtz\") pod \"d61c213d-26bb-4470-87e4-aef57d9c8512\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.687758 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-utilities\") pod \"d61c213d-26bb-4470-87e4-aef57d9c8512\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.687832 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-catalog-content\") pod \"d61c213d-26bb-4470-87e4-aef57d9c8512\" (UID: \"d61c213d-26bb-4470-87e4-aef57d9c8512\") " Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.690390 4869 scope.go:117] "RemoveContainer" containerID="e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.690640 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-utilities" (OuterVolumeSpecName: "utilities") pod "d61c213d-26bb-4470-87e4-aef57d9c8512" (UID: "d61c213d-26bb-4470-87e4-aef57d9c8512"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.695454 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d61c213d-26bb-4470-87e4-aef57d9c8512-kube-api-access-dbrtz" (OuterVolumeSpecName: "kube-api-access-dbrtz") pod "d61c213d-26bb-4470-87e4-aef57d9c8512" (UID: "d61c213d-26bb-4470-87e4-aef57d9c8512"). InnerVolumeSpecName "kube-api-access-dbrtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.786041 4869 scope.go:117] "RemoveContainer" containerID="f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9" Sep 29 14:28:53 crc kubenswrapper[4869]: E0929 14:28:53.787010 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9\": container with ID starting with f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9 not found: ID does not exist" containerID="f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.787068 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9"} err="failed to get container status \"f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9\": rpc error: code = NotFound desc = could not find container \"f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9\": container with ID starting with f4fd8f1ad0298340e3e7ea7eef5ead0fe96a827b0ba68c8deea3ffc547f44ae9 not found: ID does not exist" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.787182 4869 scope.go:117] "RemoveContainer" containerID="1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905" Sep 29 14:28:53 crc kubenswrapper[4869]: E0929 14:28:53.787541 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905\": container with ID starting with 1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905 not found: ID does not exist" containerID="1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.787653 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905"} err="failed to get container status \"1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905\": rpc error: code = NotFound desc = could not find container \"1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905\": container with ID starting with 1c003aa4e4aae67fec72bf1d665c2baea0b001ae08ec121e31555cb5d9165905 not found: ID does not exist" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.787785 4869 scope.go:117] "RemoveContainer" containerID="e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa" Sep 29 14:28:53 crc kubenswrapper[4869]: E0929 14:28:53.788165 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa\": container with ID starting with e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa not found: ID does not exist" containerID="e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.788206 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa"} err="failed to get container status \"e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa\": rpc error: code = NotFound desc = could not find container \"e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa\": container with ID starting with e3829ffafbe374a3ec37cbdb9ca62831730a9b74f3c2032135aa8477ed6630fa not found: ID does not exist" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.791088 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbrtz\" (UniqueName: \"kubernetes.io/projected/d61c213d-26bb-4470-87e4-aef57d9c8512-kube-api-access-dbrtz\") on node \"crc\" DevicePath \"\"" Sep 29 14:28:53 crc kubenswrapper[4869]: I0929 14:28:53.791133 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:28:55 crc kubenswrapper[4869]: I0929 14:28:55.255688 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d61c213d-26bb-4470-87e4-aef57d9c8512" (UID: "d61c213d-26bb-4470-87e4-aef57d9c8512"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:28:55 crc kubenswrapper[4869]: I0929 14:28:55.338036 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c213d-26bb-4470-87e4-aef57d9c8512-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:28:55 crc kubenswrapper[4869]: I0929 14:28:55.465384 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cjv8f"] Sep 29 14:28:55 crc kubenswrapper[4869]: I0929 14:28:55.475027 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cjv8f"] Sep 29 14:28:56 crc kubenswrapper[4869]: I0929 14:28:56.258375 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" path="/var/lib/kubelet/pods/d61c213d-26bb-4470-87e4-aef57d9c8512/volumes" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.199475 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf"] Sep 29 14:30:00 crc kubenswrapper[4869]: E0929 14:30:00.201575 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerName="registry-server" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.201602 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerName="registry-server" Sep 29 14:30:00 crc kubenswrapper[4869]: E0929 14:30:00.201689 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerName="extract-content" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.201702 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerName="extract-content" Sep 29 14:30:00 crc kubenswrapper[4869]: E0929 14:30:00.201754 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerName="extract-utilities" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.201765 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerName="extract-utilities" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.202321 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="d61c213d-26bb-4470-87e4-aef57d9c8512" containerName="registry-server" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.203968 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.210129 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.210527 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.267738 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf"] Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.352256 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pfhx\" (UniqueName: \"kubernetes.io/projected/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-kube-api-access-4pfhx\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.352425 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-secret-volume\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.352483 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-config-volume\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.455083 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pfhx\" (UniqueName: \"kubernetes.io/projected/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-kube-api-access-4pfhx\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.455179 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-secret-volume\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.455228 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-config-volume\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.456428 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-config-volume\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.467143 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-secret-volume\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.476833 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pfhx\" (UniqueName: \"kubernetes.io/projected/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-kube-api-access-4pfhx\") pod \"collect-profiles-29319270-mh9wf\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:00 crc kubenswrapper[4869]: I0929 14:30:00.540411 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:01 crc kubenswrapper[4869]: I0929 14:30:01.051086 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf"] Sep 29 14:30:01 crc kubenswrapper[4869]: I0929 14:30:01.349717 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" event={"ID":"ba0e62b3-4e01-4cdc-a093-4f1ea622500b","Type":"ContainerStarted","Data":"956911aebfa0ec896e440466050edfa6f38ebc7ba9209e7d16651403abee6741"} Sep 29 14:30:01 crc kubenswrapper[4869]: I0929 14:30:01.350173 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" event={"ID":"ba0e62b3-4e01-4cdc-a093-4f1ea622500b","Type":"ContainerStarted","Data":"c2320a99e1bc169878e72dda16f5db1e85bddd3f3bee08ce491393cd2707605c"} Sep 29 14:30:01 crc kubenswrapper[4869]: I0929 14:30:01.376592 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" podStartSLOduration=1.376563918 podStartE2EDuration="1.376563918s" podCreationTimestamp="2025-09-29 14:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:30:01.371411233 +0000 UTC m=+2927.812055573" watchObservedRunningTime="2025-09-29 14:30:01.376563918 +0000 UTC m=+2927.817208238" Sep 29 14:30:02 crc kubenswrapper[4869]: I0929 14:30:02.365996 4869 generic.go:334] "Generic (PLEG): container finished" podID="ba0e62b3-4e01-4cdc-a093-4f1ea622500b" containerID="956911aebfa0ec896e440466050edfa6f38ebc7ba9209e7d16651403abee6741" exitCode=0 Sep 29 14:30:02 crc kubenswrapper[4869]: I0929 14:30:02.366052 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" event={"ID":"ba0e62b3-4e01-4cdc-a093-4f1ea622500b","Type":"ContainerDied","Data":"956911aebfa0ec896e440466050edfa6f38ebc7ba9209e7d16651403abee6741"} Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.732691 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.833780 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-secret-volume\") pod \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.833891 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pfhx\" (UniqueName: \"kubernetes.io/projected/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-kube-api-access-4pfhx\") pod \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.833961 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-config-volume\") pod \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\" (UID: \"ba0e62b3-4e01-4cdc-a093-4f1ea622500b\") " Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.835198 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-config-volume" (OuterVolumeSpecName: "config-volume") pod "ba0e62b3-4e01-4cdc-a093-4f1ea622500b" (UID: "ba0e62b3-4e01-4cdc-a093-4f1ea622500b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.840968 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ba0e62b3-4e01-4cdc-a093-4f1ea622500b" (UID: "ba0e62b3-4e01-4cdc-a093-4f1ea622500b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.841034 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-kube-api-access-4pfhx" (OuterVolumeSpecName: "kube-api-access-4pfhx") pod "ba0e62b3-4e01-4cdc-a093-4f1ea622500b" (UID: "ba0e62b3-4e01-4cdc-a093-4f1ea622500b"). InnerVolumeSpecName "kube-api-access-4pfhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.936846 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.937281 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pfhx\" (UniqueName: \"kubernetes.io/projected/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-kube-api-access-4pfhx\") on node \"crc\" DevicePath \"\"" Sep 29 14:30:03 crc kubenswrapper[4869]: I0929 14:30:03.937300 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ba0e62b3-4e01-4cdc-a093-4f1ea622500b-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 14:30:04 crc kubenswrapper[4869]: I0929 14:30:04.389180 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" event={"ID":"ba0e62b3-4e01-4cdc-a093-4f1ea622500b","Type":"ContainerDied","Data":"c2320a99e1bc169878e72dda16f5db1e85bddd3f3bee08ce491393cd2707605c"} Sep 29 14:30:04 crc kubenswrapper[4869]: I0929 14:30:04.389238 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2320a99e1bc169878e72dda16f5db1e85bddd3f3bee08ce491393cd2707605c" Sep 29 14:30:04 crc kubenswrapper[4869]: I0929 14:30:04.389268 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf" Sep 29 14:30:04 crc kubenswrapper[4869]: I0929 14:30:04.467333 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g"] Sep 29 14:30:04 crc kubenswrapper[4869]: I0929 14:30:04.479504 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319225-g8k2g"] Sep 29 14:30:06 crc kubenswrapper[4869]: I0929 14:30:06.257174 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="103e108b-41cc-4b2b-a550-55b3541e1614" path="/var/lib/kubelet/pods/103e108b-41cc-4b2b-a550-55b3541e1614/volumes" Sep 29 14:30:20 crc kubenswrapper[4869]: I0929 14:30:20.656961 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:30:20 crc kubenswrapper[4869]: I0929 14:30:20.658757 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:30:23 crc kubenswrapper[4869]: I0929 14:30:23.603226 4869 scope.go:117] "RemoveContainer" containerID="fff2e67cb14f9c979bfeb1d00a517d02b3a67e402db23f9519c96fe6b36268fc" Sep 29 14:30:50 crc kubenswrapper[4869]: I0929 14:30:50.657412 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:30:50 crc kubenswrapper[4869]: I0929 14:30:50.658463 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:31:19 crc kubenswrapper[4869]: I0929 14:31:19.167731 4869 generic.go:334] "Generic (PLEG): container finished" podID="03e348b9-33ba-41f9-ac42-792fd12e4e7c" containerID="81b2a15215a3597e94eaa832ec8a17f9fa72b48bc5c1199e5a2c65ef8309e1f0" exitCode=0 Sep 29 14:31:19 crc kubenswrapper[4869]: I0929 14:31:19.167806 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" event={"ID":"03e348b9-33ba-41f9-ac42-792fd12e4e7c","Type":"ContainerDied","Data":"81b2a15215a3597e94eaa832ec8a17f9fa72b48bc5c1199e5a2c65ef8309e1f0"} Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.605534 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.696536 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.697132 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.697206 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.698778 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.699097 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" gracePeriod=600 Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.799343 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwtv4\" (UniqueName: \"kubernetes.io/projected/03e348b9-33ba-41f9-ac42-792fd12e4e7c-kube-api-access-pwtv4\") pod \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.799466 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ceph\") pod \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.799558 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-combined-ca-bundle\") pod \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.799636 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ssh-key\") pod \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.799752 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-secret-0\") pod \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.799814 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-inventory\") pod \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\" (UID: \"03e348b9-33ba-41f9-ac42-792fd12e4e7c\") " Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.809272 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ceph" (OuterVolumeSpecName: "ceph") pod "03e348b9-33ba-41f9-ac42-792fd12e4e7c" (UID: "03e348b9-33ba-41f9-ac42-792fd12e4e7c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.810221 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03e348b9-33ba-41f9-ac42-792fd12e4e7c-kube-api-access-pwtv4" (OuterVolumeSpecName: "kube-api-access-pwtv4") pod "03e348b9-33ba-41f9-ac42-792fd12e4e7c" (UID: "03e348b9-33ba-41f9-ac42-792fd12e4e7c"). InnerVolumeSpecName "kube-api-access-pwtv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.810748 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "03e348b9-33ba-41f9-ac42-792fd12e4e7c" (UID: "03e348b9-33ba-41f9-ac42-792fd12e4e7c"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:31:20 crc kubenswrapper[4869]: E0929 14:31:20.828036 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.841200 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-inventory" (OuterVolumeSpecName: "inventory") pod "03e348b9-33ba-41f9-ac42-792fd12e4e7c" (UID: "03e348b9-33ba-41f9-ac42-792fd12e4e7c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.841998 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "03e348b9-33ba-41f9-ac42-792fd12e4e7c" (UID: "03e348b9-33ba-41f9-ac42-792fd12e4e7c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.846887 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "03e348b9-33ba-41f9-ac42-792fd12e4e7c" (UID: "03e348b9-33ba-41f9-ac42-792fd12e4e7c"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.902656 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwtv4\" (UniqueName: \"kubernetes.io/projected/03e348b9-33ba-41f9-ac42-792fd12e4e7c-kube-api-access-pwtv4\") on node \"crc\" DevicePath \"\"" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.902693 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.902706 4869 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.902720 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.902731 4869 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:31:20 crc kubenswrapper[4869]: I0929 14:31:20.902742 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03e348b9-33ba-41f9-ac42-792fd12e4e7c-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.200041 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" exitCode=0 Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.200154 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e"} Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.200273 4869 scope.go:117] "RemoveContainer" containerID="7e69c5c58d34e2db58fa3aef87c57c6d11c45ddd8cfa8a8dcc0f83dbfcc86973" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.201375 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:31:21 crc kubenswrapper[4869]: E0929 14:31:21.202087 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.202706 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" event={"ID":"03e348b9-33ba-41f9-ac42-792fd12e4e7c","Type":"ContainerDied","Data":"68656475f121f660184de04a75d916b866458abeb44bd23db1d18f324f6880bf"} Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.202755 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.202759 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68656475f121f660184de04a75d916b866458abeb44bd23db1d18f324f6880bf" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.364280 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r"] Sep 29 14:31:21 crc kubenswrapper[4869]: E0929 14:31:21.365382 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba0e62b3-4e01-4cdc-a093-4f1ea622500b" containerName="collect-profiles" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.365404 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba0e62b3-4e01-4cdc-a093-4f1ea622500b" containerName="collect-profiles" Sep 29 14:31:21 crc kubenswrapper[4869]: E0929 14:31:21.365430 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03e348b9-33ba-41f9-ac42-792fd12e4e7c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.365439 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="03e348b9-33ba-41f9-ac42-792fd12e4e7c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.365642 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba0e62b3-4e01-4cdc-a093-4f1ea622500b" containerName="collect-profiles" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.365661 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="03e348b9-33ba-41f9-ac42-792fd12e4e7c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.366510 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.369395 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.371563 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.371652 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.371795 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.372047 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.372092 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.372208 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.372272 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.372341 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.380661 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r"] Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.514499 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.514565 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdp9m\" (UniqueName: \"kubernetes.io/projected/2a0f54ec-6749-43a1-81d5-064a45a1d715-kube-api-access-sdp9m\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.514594 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.514705 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.515550 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.515618 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.515666 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.515724 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.515786 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.516473 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.516599 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.618875 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.618962 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619019 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619084 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619125 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619153 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619193 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619234 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdp9m\" (UniqueName: \"kubernetes.io/projected/2a0f54ec-6749-43a1-81d5-064a45a1d715-kube-api-access-sdp9m\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619269 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619332 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.619368 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.620598 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.622298 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.624558 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.624573 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.625086 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.626285 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.626571 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.626963 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.626963 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.627652 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.639646 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdp9m\" (UniqueName: \"kubernetes.io/projected/2a0f54ec-6749-43a1-81d5-064a45a1d715-kube-api-access-sdp9m\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:21 crc kubenswrapper[4869]: I0929 14:31:21.703388 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:31:22 crc kubenswrapper[4869]: I0929 14:31:22.671745 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r"] Sep 29 14:31:22 crc kubenswrapper[4869]: W0929 14:31:22.676714 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a0f54ec_6749_43a1_81d5_064a45a1d715.slice/crio-0a9af247f6529ac5a4cbae4aa0703bb1d2415c75962b71d49d041a750162f183 WatchSource:0}: Error finding container 0a9af247f6529ac5a4cbae4aa0703bb1d2415c75962b71d49d041a750162f183: Status 404 returned error can't find the container with id 0a9af247f6529ac5a4cbae4aa0703bb1d2415c75962b71d49d041a750162f183 Sep 29 14:31:22 crc kubenswrapper[4869]: I0929 14:31:22.682279 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:31:23 crc kubenswrapper[4869]: I0929 14:31:23.228984 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" event={"ID":"2a0f54ec-6749-43a1-81d5-064a45a1d715","Type":"ContainerStarted","Data":"0a9af247f6529ac5a4cbae4aa0703bb1d2415c75962b71d49d041a750162f183"} Sep 29 14:31:24 crc kubenswrapper[4869]: I0929 14:31:24.261327 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" event={"ID":"2a0f54ec-6749-43a1-81d5-064a45a1d715","Type":"ContainerStarted","Data":"b6628f8193dd1c7e66833ee1ccbae48a2b3e6f89eaa0d129ff95104175ef07be"} Sep 29 14:31:24 crc kubenswrapper[4869]: I0929 14:31:24.316815 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" podStartSLOduration=2.8248004939999998 podStartE2EDuration="3.316787057s" podCreationTimestamp="2025-09-29 14:31:21 +0000 UTC" firstStartedPulling="2025-09-29 14:31:22.681967627 +0000 UTC m=+3009.122611947" lastFinishedPulling="2025-09-29 14:31:23.17395419 +0000 UTC m=+3009.614598510" observedRunningTime="2025-09-29 14:31:24.312462754 +0000 UTC m=+3010.753107094" watchObservedRunningTime="2025-09-29 14:31:24.316787057 +0000 UTC m=+3010.757431407" Sep 29 14:31:36 crc kubenswrapper[4869]: I0929 14:31:36.242691 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:31:36 crc kubenswrapper[4869]: E0929 14:31:36.243802 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:31:48 crc kubenswrapper[4869]: I0929 14:31:48.241861 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:31:48 crc kubenswrapper[4869]: E0929 14:31:48.242928 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:32:02 crc kubenswrapper[4869]: I0929 14:32:02.242696 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:32:02 crc kubenswrapper[4869]: E0929 14:32:02.243792 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:32:14 crc kubenswrapper[4869]: I0929 14:32:14.252595 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:32:14 crc kubenswrapper[4869]: E0929 14:32:14.253644 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:32:23 crc kubenswrapper[4869]: I0929 14:32:23.721366 4869 scope.go:117] "RemoveContainer" containerID="ee34e87fa1f211b23936f2efa84f3e191ac9517c3d21b7ca9877f3448f758edf" Sep 29 14:32:23 crc kubenswrapper[4869]: I0929 14:32:23.763392 4869 scope.go:117] "RemoveContainer" containerID="c2ef10d77e49cea09637b70912392c6e3420973e50d403fda7e0d5248c37639e" Sep 29 14:32:23 crc kubenswrapper[4869]: I0929 14:32:23.804443 4869 scope.go:117] "RemoveContainer" containerID="c546edfd22475899530e0bd046d66d52b69812eebc5a5e2cb5f9c253b449cc45" Sep 29 14:32:26 crc kubenswrapper[4869]: I0929 14:32:26.242335 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:32:26 crc kubenswrapper[4869]: E0929 14:32:26.245079 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:32:31 crc kubenswrapper[4869]: I0929 14:32:31.908550 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-v2pg2"] Sep 29 14:32:31 crc kubenswrapper[4869]: I0929 14:32:31.911662 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:31 crc kubenswrapper[4869]: I0929 14:32:31.934903 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v2pg2"] Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.024337 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-catalog-content\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.024433 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-utilities\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.024465 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b9lj\" (UniqueName: \"kubernetes.io/projected/f672eb9f-1564-4f6d-8b26-f413b85f10da-kube-api-access-2b9lj\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.127414 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-catalog-content\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.127494 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-utilities\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.127521 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b9lj\" (UniqueName: \"kubernetes.io/projected/f672eb9f-1564-4f6d-8b26-f413b85f10da-kube-api-access-2b9lj\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.128079 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-utilities\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.128194 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-catalog-content\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.155538 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b9lj\" (UniqueName: \"kubernetes.io/projected/f672eb9f-1564-4f6d-8b26-f413b85f10da-kube-api-access-2b9lj\") pod \"certified-operators-v2pg2\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.248226 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.843819 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v2pg2"] Sep 29 14:32:32 crc kubenswrapper[4869]: I0929 14:32:32.982519 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v2pg2" event={"ID":"f672eb9f-1564-4f6d-8b26-f413b85f10da","Type":"ContainerStarted","Data":"a8ace96e6910c57ecf428f2023bf6bcdd507f9c323cb065b07091304b037734c"} Sep 29 14:32:33 crc kubenswrapper[4869]: I0929 14:32:33.996227 4869 generic.go:334] "Generic (PLEG): container finished" podID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerID="bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042" exitCode=0 Sep 29 14:32:33 crc kubenswrapper[4869]: I0929 14:32:33.996306 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v2pg2" event={"ID":"f672eb9f-1564-4f6d-8b26-f413b85f10da","Type":"ContainerDied","Data":"bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042"} Sep 29 14:32:36 crc kubenswrapper[4869]: I0929 14:32:36.021316 4869 generic.go:334] "Generic (PLEG): container finished" podID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerID="d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c" exitCode=0 Sep 29 14:32:36 crc kubenswrapper[4869]: I0929 14:32:36.021359 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v2pg2" event={"ID":"f672eb9f-1564-4f6d-8b26-f413b85f10da","Type":"ContainerDied","Data":"d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c"} Sep 29 14:32:37 crc kubenswrapper[4869]: I0929 14:32:37.034665 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v2pg2" event={"ID":"f672eb9f-1564-4f6d-8b26-f413b85f10da","Type":"ContainerStarted","Data":"71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee"} Sep 29 14:32:37 crc kubenswrapper[4869]: I0929 14:32:37.065331 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-v2pg2" podStartSLOduration=3.515146948 podStartE2EDuration="6.0653042s" podCreationTimestamp="2025-09-29 14:32:31 +0000 UTC" firstStartedPulling="2025-09-29 14:32:33.999045845 +0000 UTC m=+3080.439690165" lastFinishedPulling="2025-09-29 14:32:36.549203097 +0000 UTC m=+3082.989847417" observedRunningTime="2025-09-29 14:32:37.055826242 +0000 UTC m=+3083.496470562" watchObservedRunningTime="2025-09-29 14:32:37.0653042 +0000 UTC m=+3083.505948520" Sep 29 14:32:41 crc kubenswrapper[4869]: I0929 14:32:41.242737 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:32:41 crc kubenswrapper[4869]: E0929 14:32:41.244324 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:32:42 crc kubenswrapper[4869]: I0929 14:32:42.257165 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:42 crc kubenswrapper[4869]: I0929 14:32:42.257230 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:42 crc kubenswrapper[4869]: I0929 14:32:42.302926 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:43 crc kubenswrapper[4869]: I0929 14:32:43.144485 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:43 crc kubenswrapper[4869]: I0929 14:32:43.203511 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v2pg2"] Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.110289 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-v2pg2" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerName="registry-server" containerID="cri-o://71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee" gracePeriod=2 Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.696514 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.846441 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-catalog-content\") pod \"f672eb9f-1564-4f6d-8b26-f413b85f10da\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.846588 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2b9lj\" (UniqueName: \"kubernetes.io/projected/f672eb9f-1564-4f6d-8b26-f413b85f10da-kube-api-access-2b9lj\") pod \"f672eb9f-1564-4f6d-8b26-f413b85f10da\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.846814 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-utilities\") pod \"f672eb9f-1564-4f6d-8b26-f413b85f10da\" (UID: \"f672eb9f-1564-4f6d-8b26-f413b85f10da\") " Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.848822 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-utilities" (OuterVolumeSpecName: "utilities") pod "f672eb9f-1564-4f6d-8b26-f413b85f10da" (UID: "f672eb9f-1564-4f6d-8b26-f413b85f10da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.859984 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f672eb9f-1564-4f6d-8b26-f413b85f10da-kube-api-access-2b9lj" (OuterVolumeSpecName: "kube-api-access-2b9lj") pod "f672eb9f-1564-4f6d-8b26-f413b85f10da" (UID: "f672eb9f-1564-4f6d-8b26-f413b85f10da"). InnerVolumeSpecName "kube-api-access-2b9lj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.959129 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:32:45 crc kubenswrapper[4869]: I0929 14:32:45.959210 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2b9lj\" (UniqueName: \"kubernetes.io/projected/f672eb9f-1564-4f6d-8b26-f413b85f10da-kube-api-access-2b9lj\") on node \"crc\" DevicePath \"\"" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.126188 4869 generic.go:334] "Generic (PLEG): container finished" podID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerID="71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee" exitCode=0 Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.126254 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v2pg2" event={"ID":"f672eb9f-1564-4f6d-8b26-f413b85f10da","Type":"ContainerDied","Data":"71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee"} Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.126293 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v2pg2" event={"ID":"f672eb9f-1564-4f6d-8b26-f413b85f10da","Type":"ContainerDied","Data":"a8ace96e6910c57ecf428f2023bf6bcdd507f9c323cb065b07091304b037734c"} Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.126317 4869 scope.go:117] "RemoveContainer" containerID="71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.126404 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v2pg2" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.153962 4869 scope.go:117] "RemoveContainer" containerID="d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.182731 4869 scope.go:117] "RemoveContainer" containerID="bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.231897 4869 scope.go:117] "RemoveContainer" containerID="71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee" Sep 29 14:32:46 crc kubenswrapper[4869]: E0929 14:32:46.232542 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee\": container with ID starting with 71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee not found: ID does not exist" containerID="71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.232609 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee"} err="failed to get container status \"71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee\": rpc error: code = NotFound desc = could not find container \"71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee\": container with ID starting with 71efbd886e3080cde29256bc077f847d06460596bd040207cd9edbeb515c96ee not found: ID does not exist" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.232665 4869 scope.go:117] "RemoveContainer" containerID="d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c" Sep 29 14:32:46 crc kubenswrapper[4869]: E0929 14:32:46.233175 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c\": container with ID starting with d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c not found: ID does not exist" containerID="d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.233208 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c"} err="failed to get container status \"d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c\": rpc error: code = NotFound desc = could not find container \"d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c\": container with ID starting with d8d6dcb581635bc880d7b4b62c31494338fd2c1577f35b7aadf7a8e48ef5751c not found: ID does not exist" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.233228 4869 scope.go:117] "RemoveContainer" containerID="bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042" Sep 29 14:32:46 crc kubenswrapper[4869]: E0929 14:32:46.233539 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042\": container with ID starting with bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042 not found: ID does not exist" containerID="bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.233567 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042"} err="failed to get container status \"bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042\": rpc error: code = NotFound desc = could not find container \"bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042\": container with ID starting with bf2499b3f3be082aba9face4a6b1528ddbfc6bcdc4f612a651401f1425908042 not found: ID does not exist" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.435347 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f672eb9f-1564-4f6d-8b26-f413b85f10da" (UID: "f672eb9f-1564-4f6d-8b26-f413b85f10da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.469068 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f672eb9f-1564-4f6d-8b26-f413b85f10da-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.766031 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v2pg2"] Sep 29 14:32:46 crc kubenswrapper[4869]: I0929 14:32:46.776523 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-v2pg2"] Sep 29 14:32:48 crc kubenswrapper[4869]: I0929 14:32:48.258175 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" path="/var/lib/kubelet/pods/f672eb9f-1564-4f6d-8b26-f413b85f10da/volumes" Sep 29 14:32:53 crc kubenswrapper[4869]: I0929 14:32:53.242564 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:32:53 crc kubenswrapper[4869]: E0929 14:32:53.243477 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:33:04 crc kubenswrapper[4869]: I0929 14:33:04.249160 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:33:04 crc kubenswrapper[4869]: E0929 14:33:04.250184 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:33:17 crc kubenswrapper[4869]: I0929 14:33:17.242345 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:33:17 crc kubenswrapper[4869]: E0929 14:33:17.244477 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:33:31 crc kubenswrapper[4869]: I0929 14:33:31.242887 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:33:31 crc kubenswrapper[4869]: E0929 14:33:31.244193 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:33:46 crc kubenswrapper[4869]: I0929 14:33:46.242715 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:33:46 crc kubenswrapper[4869]: E0929 14:33:46.245073 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:34:00 crc kubenswrapper[4869]: I0929 14:34:00.244875 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:34:00 crc kubenswrapper[4869]: E0929 14:34:00.246166 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:34:13 crc kubenswrapper[4869]: I0929 14:34:13.242913 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:34:13 crc kubenswrapper[4869]: E0929 14:34:13.244093 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:34:24 crc kubenswrapper[4869]: I0929 14:34:24.253214 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:34:24 crc kubenswrapper[4869]: E0929 14:34:24.254297 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:34:38 crc kubenswrapper[4869]: I0929 14:34:38.242874 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:34:38 crc kubenswrapper[4869]: E0929 14:34:38.245158 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:34:50 crc kubenswrapper[4869]: I0929 14:34:50.242157 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:34:50 crc kubenswrapper[4869]: E0929 14:34:50.243163 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:35:05 crc kubenswrapper[4869]: I0929 14:35:05.242749 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:35:05 crc kubenswrapper[4869]: E0929 14:35:05.243757 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:35:15 crc kubenswrapper[4869]: I0929 14:35:15.733463 4869 generic.go:334] "Generic (PLEG): container finished" podID="2a0f54ec-6749-43a1-81d5-064a45a1d715" containerID="b6628f8193dd1c7e66833ee1ccbae48a2b3e6f89eaa0d129ff95104175ef07be" exitCode=0 Sep 29 14:35:15 crc kubenswrapper[4869]: I0929 14:35:15.734191 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" event={"ID":"2a0f54ec-6749-43a1-81d5-064a45a1d715","Type":"ContainerDied","Data":"b6628f8193dd1c7e66833ee1ccbae48a2b3e6f89eaa0d129ff95104175ef07be"} Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.179223 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.307547 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.307667 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-custom-ceph-combined-ca-bundle\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.307731 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-1\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.307786 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-0\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.307844 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-extra-config-0\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.307924 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-0\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.307959 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdp9m\" (UniqueName: \"kubernetes.io/projected/2a0f54ec-6749-43a1-81d5-064a45a1d715-kube-api-access-sdp9m\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.308091 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-inventory\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.308163 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ssh-key\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.308193 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph-nova-0\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.308263 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-1\") pod \"2a0f54ec-6749-43a1-81d5-064a45a1d715\" (UID: \"2a0f54ec-6749-43a1-81d5-064a45a1d715\") " Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.315304 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a0f54ec-6749-43a1-81d5-064a45a1d715-kube-api-access-sdp9m" (OuterVolumeSpecName: "kube-api-access-sdp9m") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "kube-api-access-sdp9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.315459 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph" (OuterVolumeSpecName: "ceph") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.323882 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.337819 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.340167 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.345441 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.348325 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.351745 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-inventory" (OuterVolumeSpecName: "inventory") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.352702 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.354072 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.354511 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2a0f54ec-6749-43a1-81d5-064a45a1d715" (UID: "2a0f54ec-6749-43a1-81d5-064a45a1d715"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.410953 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411300 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411356 4869 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411411 4869 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411464 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411515 4869 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411574 4869 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411655 4869 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411722 4869 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411785 4869 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2a0f54ec-6749-43a1-81d5-064a45a1d715-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.411842 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdp9m\" (UniqueName: \"kubernetes.io/projected/2a0f54ec-6749-43a1-81d5-064a45a1d715-kube-api-access-sdp9m\") on node \"crc\" DevicePath \"\"" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.753999 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" event={"ID":"2a0f54ec-6749-43a1-81d5-064a45a1d715","Type":"ContainerDied","Data":"0a9af247f6529ac5a4cbae4aa0703bb1d2415c75962b71d49d041a750162f183"} Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.754050 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a9af247f6529ac5a4cbae4aa0703bb1d2415c75962b71d49d041a750162f183" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.754337 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.951136 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66"] Sep 29 14:35:17 crc kubenswrapper[4869]: E0929 14:35:17.952035 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a0f54ec-6749-43a1-81d5-064a45a1d715" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.952058 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a0f54ec-6749-43a1-81d5-064a45a1d715" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Sep 29 14:35:17 crc kubenswrapper[4869]: E0929 14:35:17.952079 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerName="extract-content" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.952086 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerName="extract-content" Sep 29 14:35:17 crc kubenswrapper[4869]: E0929 14:35:17.952100 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerName="extract-utilities" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.952107 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerName="extract-utilities" Sep 29 14:35:17 crc kubenswrapper[4869]: E0929 14:35:17.952130 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerName="registry-server" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.952139 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerName="registry-server" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.952385 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f672eb9f-1564-4f6d-8b26-f413b85f10da" containerName="registry-server" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.952407 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a0f54ec-6749-43a1-81d5-064a45a1d715" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.953484 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.957591 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.957809 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.958225 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.958550 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.958720 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-sfkgv" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.959185 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Sep 29 14:35:17 crc kubenswrapper[4869]: I0929 14:35:17.973006 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66"] Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.126738 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.126815 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.126850 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fjmn\" (UniqueName: \"kubernetes.io/projected/7417019a-14a4-45f5-99d1-b7b84efb665e-kube-api-access-6fjmn\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.127019 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.127134 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.127396 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.127504 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.127757 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceph\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.229725 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.229800 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fjmn\" (UniqueName: \"kubernetes.io/projected/7417019a-14a4-45f5-99d1-b7b84efb665e-kube-api-access-6fjmn\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.229832 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.229863 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.229903 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.229925 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.229974 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceph\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.230067 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.234643 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.235052 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.235531 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.235786 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.236594 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.237013 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceph\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.237775 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.250404 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fjmn\" (UniqueName: \"kubernetes.io/projected/7417019a-14a4-45f5-99d1-b7b84efb665e-kube-api-access-6fjmn\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7vx66\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.277399 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:35:18 crc kubenswrapper[4869]: I0929 14:35:18.840976 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66"] Sep 29 14:35:19 crc kubenswrapper[4869]: I0929 14:35:19.776075 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" event={"ID":"7417019a-14a4-45f5-99d1-b7b84efb665e","Type":"ContainerStarted","Data":"c2c3b1e7ef3417e63fb48a57390a3d69faeee5dc6b87acd96b654fab2dc9878c"} Sep 29 14:35:19 crc kubenswrapper[4869]: I0929 14:35:19.776561 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" event={"ID":"7417019a-14a4-45f5-99d1-b7b84efb665e","Type":"ContainerStarted","Data":"842a0d3ea7dbe7a93a6f787820941d223b180621645343e2cacb89236a56587e"} Sep 29 14:35:19 crc kubenswrapper[4869]: I0929 14:35:19.808228 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" podStartSLOduration=2.375374064 podStartE2EDuration="2.808200232s" podCreationTimestamp="2025-09-29 14:35:17 +0000 UTC" firstStartedPulling="2025-09-29 14:35:18.850312685 +0000 UTC m=+3245.290957005" lastFinishedPulling="2025-09-29 14:35:19.283138853 +0000 UTC m=+3245.723783173" observedRunningTime="2025-09-29 14:35:19.797980365 +0000 UTC m=+3246.238624735" watchObservedRunningTime="2025-09-29 14:35:19.808200232 +0000 UTC m=+3246.248844542" Sep 29 14:35:20 crc kubenswrapper[4869]: I0929 14:35:20.242371 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:35:20 crc kubenswrapper[4869]: E0929 14:35:20.243337 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:35:31 crc kubenswrapper[4869]: I0929 14:35:31.241892 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:35:31 crc kubenswrapper[4869]: E0929 14:35:31.242895 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:35:43 crc kubenswrapper[4869]: I0929 14:35:43.249550 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:35:43 crc kubenswrapper[4869]: E0929 14:35:43.250535 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:35:55 crc kubenswrapper[4869]: I0929 14:35:55.242355 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:35:55 crc kubenswrapper[4869]: E0929 14:35:55.243470 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:36:06 crc kubenswrapper[4869]: I0929 14:36:06.242795 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:36:06 crc kubenswrapper[4869]: E0929 14:36:06.243667 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:36:19 crc kubenswrapper[4869]: I0929 14:36:19.242252 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:36:19 crc kubenswrapper[4869]: E0929 14:36:19.243312 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:36:34 crc kubenswrapper[4869]: I0929 14:36:34.250639 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:36:34 crc kubenswrapper[4869]: I0929 14:36:34.597311 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"f298b97a8edb9c60bc2dd60bf4f41dedb5b4755782ceecf01ef58ab01f623568"} Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.179799 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kf9mn"] Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.183177 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.195439 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kf9mn"] Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.330276 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-utilities\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.330354 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-catalog-content\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.330433 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdxnf\" (UniqueName: \"kubernetes.io/projected/414c621a-7fa3-43be-8f25-26233c10b098-kube-api-access-gdxnf\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.432398 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-utilities\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.432473 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-catalog-content\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.433055 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-utilities\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.433118 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-catalog-content\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.433663 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdxnf\" (UniqueName: \"kubernetes.io/projected/414c621a-7fa3-43be-8f25-26233c10b098-kube-api-access-gdxnf\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.454599 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdxnf\" (UniqueName: \"kubernetes.io/projected/414c621a-7fa3-43be-8f25-26233c10b098-kube-api-access-gdxnf\") pod \"redhat-marketplace-kf9mn\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.508232 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:03 crc kubenswrapper[4869]: I0929 14:37:03.999552 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kf9mn"] Sep 29 14:37:04 crc kubenswrapper[4869]: I0929 14:37:04.921176 4869 generic.go:334] "Generic (PLEG): container finished" podID="414c621a-7fa3-43be-8f25-26233c10b098" containerID="3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46" exitCode=0 Sep 29 14:37:04 crc kubenswrapper[4869]: I0929 14:37:04.921271 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kf9mn" event={"ID":"414c621a-7fa3-43be-8f25-26233c10b098","Type":"ContainerDied","Data":"3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46"} Sep 29 14:37:04 crc kubenswrapper[4869]: I0929 14:37:04.921706 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kf9mn" event={"ID":"414c621a-7fa3-43be-8f25-26233c10b098","Type":"ContainerStarted","Data":"e5ffd311cfe86b96102be27808d37780215439d3a78df9a36c185f04db7b67e0"} Sep 29 14:37:04 crc kubenswrapper[4869]: I0929 14:37:04.923893 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:37:05 crc kubenswrapper[4869]: I0929 14:37:05.934594 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kf9mn" event={"ID":"414c621a-7fa3-43be-8f25-26233c10b098","Type":"ContainerStarted","Data":"fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79"} Sep 29 14:37:06 crc kubenswrapper[4869]: I0929 14:37:06.960744 4869 generic.go:334] "Generic (PLEG): container finished" podID="414c621a-7fa3-43be-8f25-26233c10b098" containerID="fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79" exitCode=0 Sep 29 14:37:06 crc kubenswrapper[4869]: I0929 14:37:06.960795 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kf9mn" event={"ID":"414c621a-7fa3-43be-8f25-26233c10b098","Type":"ContainerDied","Data":"fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79"} Sep 29 14:37:07 crc kubenswrapper[4869]: I0929 14:37:07.978240 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kf9mn" event={"ID":"414c621a-7fa3-43be-8f25-26233c10b098","Type":"ContainerStarted","Data":"0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372"} Sep 29 14:37:08 crc kubenswrapper[4869]: I0929 14:37:08.006345 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kf9mn" podStartSLOduration=2.518955799 podStartE2EDuration="5.00631625s" podCreationTimestamp="2025-09-29 14:37:03 +0000 UTC" firstStartedPulling="2025-09-29 14:37:04.923513959 +0000 UTC m=+3351.364158289" lastFinishedPulling="2025-09-29 14:37:07.41087442 +0000 UTC m=+3353.851518740" observedRunningTime="2025-09-29 14:37:08.002070139 +0000 UTC m=+3354.442714459" watchObservedRunningTime="2025-09-29 14:37:08.00631625 +0000 UTC m=+3354.446960570" Sep 29 14:37:13 crc kubenswrapper[4869]: I0929 14:37:13.508505 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:13 crc kubenswrapper[4869]: I0929 14:37:13.508907 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:13 crc kubenswrapper[4869]: I0929 14:37:13.561634 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:14 crc kubenswrapper[4869]: I0929 14:37:14.100421 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:14 crc kubenswrapper[4869]: I0929 14:37:14.157507 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kf9mn"] Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.062919 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kf9mn" podUID="414c621a-7fa3-43be-8f25-26233c10b098" containerName="registry-server" containerID="cri-o://0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372" gracePeriod=2 Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.564374 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.649252 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdxnf\" (UniqueName: \"kubernetes.io/projected/414c621a-7fa3-43be-8f25-26233c10b098-kube-api-access-gdxnf\") pod \"414c621a-7fa3-43be-8f25-26233c10b098\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.649777 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-utilities\") pod \"414c621a-7fa3-43be-8f25-26233c10b098\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.649832 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-catalog-content\") pod \"414c621a-7fa3-43be-8f25-26233c10b098\" (UID: \"414c621a-7fa3-43be-8f25-26233c10b098\") " Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.651097 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-utilities" (OuterVolumeSpecName: "utilities") pod "414c621a-7fa3-43be-8f25-26233c10b098" (UID: "414c621a-7fa3-43be-8f25-26233c10b098"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.662089 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/414c621a-7fa3-43be-8f25-26233c10b098-kube-api-access-gdxnf" (OuterVolumeSpecName: "kube-api-access-gdxnf") pod "414c621a-7fa3-43be-8f25-26233c10b098" (UID: "414c621a-7fa3-43be-8f25-26233c10b098"). InnerVolumeSpecName "kube-api-access-gdxnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.667573 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "414c621a-7fa3-43be-8f25-26233c10b098" (UID: "414c621a-7fa3-43be-8f25-26233c10b098"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.752667 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdxnf\" (UniqueName: \"kubernetes.io/projected/414c621a-7fa3-43be-8f25-26233c10b098-kube-api-access-gdxnf\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.752707 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:16 crc kubenswrapper[4869]: I0929 14:37:16.752721 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/414c621a-7fa3-43be-8f25-26233c10b098-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.077398 4869 generic.go:334] "Generic (PLEG): container finished" podID="414c621a-7fa3-43be-8f25-26233c10b098" containerID="0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372" exitCode=0 Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.077454 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kf9mn" event={"ID":"414c621a-7fa3-43be-8f25-26233c10b098","Type":"ContainerDied","Data":"0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372"} Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.077859 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kf9mn" event={"ID":"414c621a-7fa3-43be-8f25-26233c10b098","Type":"ContainerDied","Data":"e5ffd311cfe86b96102be27808d37780215439d3a78df9a36c185f04db7b67e0"} Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.077885 4869 scope.go:117] "RemoveContainer" containerID="0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.077512 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kf9mn" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.119735 4869 scope.go:117] "RemoveContainer" containerID="fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.126518 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kf9mn"] Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.138595 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kf9mn"] Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.147128 4869 scope.go:117] "RemoveContainer" containerID="3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.190186 4869 scope.go:117] "RemoveContainer" containerID="0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372" Sep 29 14:37:17 crc kubenswrapper[4869]: E0929 14:37:17.190853 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372\": container with ID starting with 0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372 not found: ID does not exist" containerID="0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.190906 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372"} err="failed to get container status \"0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372\": rpc error: code = NotFound desc = could not find container \"0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372\": container with ID starting with 0a3c3a48da76a730666fb86b5aaa74c96904fa64b61dd6ac5e14b31304930372 not found: ID does not exist" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.190931 4869 scope.go:117] "RemoveContainer" containerID="fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79" Sep 29 14:37:17 crc kubenswrapper[4869]: E0929 14:37:17.191216 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79\": container with ID starting with fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79 not found: ID does not exist" containerID="fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.191281 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79"} err="failed to get container status \"fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79\": rpc error: code = NotFound desc = could not find container \"fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79\": container with ID starting with fd4b24ba59f21c0f3e966255db56389c04807f6d60ccddd54fb066ce03124e79 not found: ID does not exist" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.191302 4869 scope.go:117] "RemoveContainer" containerID="3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46" Sep 29 14:37:17 crc kubenswrapper[4869]: E0929 14:37:17.192113 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46\": container with ID starting with 3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46 not found: ID does not exist" containerID="3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46" Sep 29 14:37:17 crc kubenswrapper[4869]: I0929 14:37:17.192180 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46"} err="failed to get container status \"3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46\": rpc error: code = NotFound desc = could not find container \"3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46\": container with ID starting with 3e195bb57abf6166d16b061990749d691941d4d15c77fad0e5941e6b3a783a46 not found: ID does not exist" Sep 29 14:37:18 crc kubenswrapper[4869]: I0929 14:37:18.254011 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="414c621a-7fa3-43be-8f25-26233c10b098" path="/var/lib/kubelet/pods/414c621a-7fa3-43be-8f25-26233c10b098/volumes" Sep 29 14:37:45 crc kubenswrapper[4869]: I0929 14:37:45.406497 4869 generic.go:334] "Generic (PLEG): container finished" podID="7417019a-14a4-45f5-99d1-b7b84efb665e" containerID="c2c3b1e7ef3417e63fb48a57390a3d69faeee5dc6b87acd96b654fab2dc9878c" exitCode=0 Sep 29 14:37:45 crc kubenswrapper[4869]: I0929 14:37:45.407372 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" event={"ID":"7417019a-14a4-45f5-99d1-b7b84efb665e","Type":"ContainerDied","Data":"c2c3b1e7ef3417e63fb48a57390a3d69faeee5dc6b87acd96b654fab2dc9878c"} Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.049975 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.124360 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-0\") pod \"7417019a-14a4-45f5-99d1-b7b84efb665e\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.124455 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-2\") pod \"7417019a-14a4-45f5-99d1-b7b84efb665e\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.124570 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-telemetry-combined-ca-bundle\") pod \"7417019a-14a4-45f5-99d1-b7b84efb665e\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.124743 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ssh-key\") pod \"7417019a-14a4-45f5-99d1-b7b84efb665e\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.124807 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fjmn\" (UniqueName: \"kubernetes.io/projected/7417019a-14a4-45f5-99d1-b7b84efb665e-kube-api-access-6fjmn\") pod \"7417019a-14a4-45f5-99d1-b7b84efb665e\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.124835 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceph\") pod \"7417019a-14a4-45f5-99d1-b7b84efb665e\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.124869 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-inventory\") pod \"7417019a-14a4-45f5-99d1-b7b84efb665e\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.124894 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-1\") pod \"7417019a-14a4-45f5-99d1-b7b84efb665e\" (UID: \"7417019a-14a4-45f5-99d1-b7b84efb665e\") " Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.134128 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7417019a-14a4-45f5-99d1-b7b84efb665e-kube-api-access-6fjmn" (OuterVolumeSpecName: "kube-api-access-6fjmn") pod "7417019a-14a4-45f5-99d1-b7b84efb665e" (UID: "7417019a-14a4-45f5-99d1-b7b84efb665e"). InnerVolumeSpecName "kube-api-access-6fjmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.134567 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceph" (OuterVolumeSpecName: "ceph") pod "7417019a-14a4-45f5-99d1-b7b84efb665e" (UID: "7417019a-14a4-45f5-99d1-b7b84efb665e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.143756 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "7417019a-14a4-45f5-99d1-b7b84efb665e" (UID: "7417019a-14a4-45f5-99d1-b7b84efb665e"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.159486 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7417019a-14a4-45f5-99d1-b7b84efb665e" (UID: "7417019a-14a4-45f5-99d1-b7b84efb665e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.161377 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "7417019a-14a4-45f5-99d1-b7b84efb665e" (UID: "7417019a-14a4-45f5-99d1-b7b84efb665e"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.169795 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "7417019a-14a4-45f5-99d1-b7b84efb665e" (UID: "7417019a-14a4-45f5-99d1-b7b84efb665e"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.179272 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "7417019a-14a4-45f5-99d1-b7b84efb665e" (UID: "7417019a-14a4-45f5-99d1-b7b84efb665e"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.182390 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-inventory" (OuterVolumeSpecName: "inventory") pod "7417019a-14a4-45f5-99d1-b7b84efb665e" (UID: "7417019a-14a4-45f5-99d1-b7b84efb665e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.227513 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.227560 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fjmn\" (UniqueName: \"kubernetes.io/projected/7417019a-14a4-45f5-99d1-b7b84efb665e-kube-api-access-6fjmn\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.227575 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.227629 4869 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-inventory\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.227641 4869 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.227654 4869 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.227667 4869 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.227677 4869 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7417019a-14a4-45f5-99d1-b7b84efb665e-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.430423 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" event={"ID":"7417019a-14a4-45f5-99d1-b7b84efb665e","Type":"ContainerDied","Data":"842a0d3ea7dbe7a93a6f787820941d223b180621645343e2cacb89236a56587e"} Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.430984 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="842a0d3ea7dbe7a93a6f787820941d223b180621645343e2cacb89236a56587e" Sep 29 14:37:47 crc kubenswrapper[4869]: I0929 14:37:47.430681 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7vx66" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.874539 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Sep 29 14:38:06 crc kubenswrapper[4869]: E0929 14:38:06.875596 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7417019a-14a4-45f5-99d1-b7b84efb665e" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.875625 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7417019a-14a4-45f5-99d1-b7b84efb665e" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Sep 29 14:38:06 crc kubenswrapper[4869]: E0929 14:38:06.875640 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="414c621a-7fa3-43be-8f25-26233c10b098" containerName="extract-content" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.875646 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="414c621a-7fa3-43be-8f25-26233c10b098" containerName="extract-content" Sep 29 14:38:06 crc kubenswrapper[4869]: E0929 14:38:06.875681 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="414c621a-7fa3-43be-8f25-26233c10b098" containerName="registry-server" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.875687 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="414c621a-7fa3-43be-8f25-26233c10b098" containerName="registry-server" Sep 29 14:38:06 crc kubenswrapper[4869]: E0929 14:38:06.875700 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="414c621a-7fa3-43be-8f25-26233c10b098" containerName="extract-utilities" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.875707 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="414c621a-7fa3-43be-8f25-26233c10b098" containerName="extract-utilities" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.875894 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="414c621a-7fa3-43be-8f25-26233c10b098" containerName="registry-server" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.875926 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="7417019a-14a4-45f5-99d1-b7b84efb665e" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.877014 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.879127 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.879904 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.894967 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.900348 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.908080 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.910852 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Sep 29 14:38:06 crc kubenswrapper[4869]: I0929 14:38:06.962104 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.039542 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.039635 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-dev\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.039658 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.039698 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/482223e1-1a82-46c5-808d-ac8f963c7c09-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.039919 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-dev\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.039944 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.039969 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.039995 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-nvme\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040060 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040091 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-run\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040113 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-scripts\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040133 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-ceph\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040273 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040362 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-sys\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040399 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-config-data-custom\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040452 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040507 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lm7bt\" (UniqueName: \"kubernetes.io/projected/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-kube-api-access-lm7bt\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040559 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f5nn\" (UniqueName: \"kubernetes.io/projected/482223e1-1a82-46c5-808d-ac8f963c7c09-kube-api-access-6f5nn\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040586 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-sys\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040630 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040662 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-run\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040706 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-lib-modules\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040773 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-config-data\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040808 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.040872 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.041029 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.041110 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.041134 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.041267 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.041305 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.041383 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.041528 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143464 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-dev\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143514 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143542 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143563 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-nvme\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143580 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143594 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-run\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143636 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-scripts\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143659 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-ceph\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143680 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143707 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-sys\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143730 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-config-data-custom\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143767 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143789 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lm7bt\" (UniqueName: \"kubernetes.io/projected/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-kube-api-access-lm7bt\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143826 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f5nn\" (UniqueName: \"kubernetes.io/projected/482223e1-1a82-46c5-808d-ac8f963c7c09-kube-api-access-6f5nn\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143847 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-sys\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143868 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143886 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-run\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143917 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-lib-modules\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143937 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-config-data\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143954 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.143980 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144004 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144024 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144038 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144068 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144086 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144103 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144138 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144160 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144191 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-dev\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144213 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144239 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/482223e1-1a82-46c5-808d-ac8f963c7c09-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144853 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.144937 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.145027 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-run\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.145054 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-lib-modules\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.145235 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.145403 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-sys\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.145447 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.145836 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.145885 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-sys\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.145921 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-dev\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.146334 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.146458 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.146629 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.146744 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-run\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.146835 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-nvme\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.146908 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-dev\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.146963 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/482223e1-1a82-46c5-808d-ac8f963c7c09-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.147060 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.147438 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.147521 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.156486 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/482223e1-1a82-46c5-808d-ac8f963c7c09-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.156824 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.158023 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-config-data-custom\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.158929 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-config-data\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.159245 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.163393 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-ceph\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.169141 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.170840 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/482223e1-1a82-46c5-808d-ac8f963c7c09-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.171231 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.171318 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-scripts\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.180352 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f5nn\" (UniqueName: \"kubernetes.io/projected/482223e1-1a82-46c5-808d-ac8f963c7c09-kube-api-access-6f5nn\") pod \"cinder-volume-volume1-0\" (UID: \"482223e1-1a82-46c5-808d-ac8f963c7c09\") " pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.184979 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lm7bt\" (UniqueName: \"kubernetes.io/projected/404cac7d-3af0-4f4f-bdd8-fc3eec4b512a-kube-api-access-lm7bt\") pod \"cinder-backup-0\" (UID: \"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a\") " pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.217693 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.221419 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume2-0"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.227583 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.235034 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume2-config-data" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.248943 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume2-0"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.250485 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.348867 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-config-data-custom\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.349298 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-scripts\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.349362 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-lib-modules\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.349384 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-iscsi\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.349413 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj4ft\" (UniqueName: \"kubernetes.io/projected/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-kube-api-access-wj4ft\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.349900 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-combined-ca-bundle\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.350002 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-dev\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.350083 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-run\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.350156 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-sys\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.350240 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-ceph\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.350267 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-machine-id\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.350336 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-nvme\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.350358 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-locks-cinder\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.351002 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-config-data\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.351169 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-lib-cinder\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.351258 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-locks-brick\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.453801 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-dev\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.453924 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-run\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.453979 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-sys\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454010 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-machine-id\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454026 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-ceph\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454079 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-nvme\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454096 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-locks-cinder\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454143 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-config-data\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454179 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-lib-cinder\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454232 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-locks-brick\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454270 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-config-data-custom\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454306 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-scripts\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454341 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-lib-modules\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454373 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-iscsi\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454397 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj4ft\" (UniqueName: \"kubernetes.io/projected/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-kube-api-access-wj4ft\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.454471 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-combined-ca-bundle\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.456274 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-dev\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.456334 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-run\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.456358 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-sys\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.456379 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-machine-id\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.456555 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-locks-brick\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.456690 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-nvme\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.456727 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-locks-cinder\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.461819 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-lib-modules\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.468629 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-config-data-custom\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.473830 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-etc-iscsi\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.475038 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-var-lib-cinder\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.475258 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-ceph\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.496562 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-config-data\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.514439 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-combined-ca-bundle\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.535767 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj4ft\" (UniqueName: \"kubernetes.io/projected/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-kube-api-access-wj4ft\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.544446 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dafc73c-4956-4f69-92c1-e9bb3957e8fe-scripts\") pod \"cinder-volume-volume2-0\" (UID: \"9dafc73c-4956-4f69-92c1-e9bb3957e8fe\") " pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.552849 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7bb49bd5bf-n7hcd"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.555137 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.567502 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.567865 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.568094 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-l4w2z" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.568326 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.617992 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7bb49bd5bf-n7hcd"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.628122 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-585944bbfc-88glg"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.630938 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.639268 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-585944bbfc-88glg"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.662783 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-config-data\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.662835 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-logs\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.662884 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-scripts\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.663005 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47kjt\" (UniqueName: \"kubernetes.io/projected/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-kube-api-access-47kjt\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.663050 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-horizon-secret-key\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.710316 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.728862 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.741852 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.746981 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-87hr5" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.747181 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.747307 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.748025 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.806852 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp6cs\" (UniqueName: \"kubernetes.io/projected/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-kube-api-access-rp6cs\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.807245 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-config-data\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.807304 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-logs\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.807907 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-scripts\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.807970 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-scripts\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.808441 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-logs\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.809290 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-logs\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.810514 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47kjt\" (UniqueName: \"kubernetes.io/projected/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-kube-api-access-47kjt\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.810983 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-horizon-secret-key\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.811045 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-config-data\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.811205 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-horizon-secret-key\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.812228 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-scripts\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.815893 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-config-data\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.827561 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-horizon-secret-key\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.868094 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.868731 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47kjt\" (UniqueName: \"kubernetes.io/projected/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-kube-api-access-47kjt\") pod \"horizon-7bb49bd5bf-n7hcd\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.906305 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.908473 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.911058 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.920863 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-logs\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.921202 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.921403 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-ceph\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.921596 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.921754 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-config-data\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.922007 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-scripts\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.921446 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.922296 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-horizon-secret-key\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.922470 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.925447 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.923438 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-config-data\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.925699 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp6cs\" (UniqueName: \"kubernetes.io/projected/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-kube-api-access-rp6cs\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.925831 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lq9cc\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-kube-api-access-lq9cc\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.925957 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-config-data\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.926157 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-scripts\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.926321 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-logs\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.926501 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.926916 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-scripts\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.927097 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-logs\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.934004 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-horizon-secret-key\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:07 crc kubenswrapper[4869]: I0929 14:38:07.940171 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:07.998532 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp6cs\" (UniqueName: \"kubernetes.io/projected/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-kube-api-access-rp6cs\") pod \"horizon-585944bbfc-88glg\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.014686 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.037254 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.037327 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp4tn\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-kube-api-access-tp4tn\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.037392 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-scripts\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.037424 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.037472 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.037518 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.037550 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.037575 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.040664 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lq9cc\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-kube-api-access-lq9cc\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.040734 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-config-data\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.040773 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.040862 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-ceph\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.040931 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.040977 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.041003 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.041093 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-logs\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.041148 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-logs\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.041337 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-ceph\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.041393 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.042404 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-logs\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.043286 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.044451 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.045237 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-scripts\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.051812 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-config-data\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.052343 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.052518 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-ceph\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.083111 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lq9cc\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-kube-api-access-lq9cc\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.089055 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.116384 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.144957 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.145129 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-ceph\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.145227 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.145309 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.145465 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-logs\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.146005 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp4tn\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-kube-api-access-tp4tn\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.146277 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.146467 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.146628 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.147447 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.150509 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-logs\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.150975 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.151712 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.163932 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.166775 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.171481 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.172541 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-ceph\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.201308 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp4tn\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-kube-api-access-tp4tn\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.202688 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.236445 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.292295 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.683755 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume2-0"] Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.723494 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a","Type":"ContainerStarted","Data":"184860fb2aacd17989537aa071185e6f0635be549e6958da08b30c99a042d5a1"} Sep 29 14:38:08 crc kubenswrapper[4869]: I0929 14:38:08.842455 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7bb49bd5bf-n7hcd"] Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.090272 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Sep 29 14:38:09 crc kubenswrapper[4869]: W0929 14:38:09.113521 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod482223e1_1a82_46c5_808d_ac8f963c7c09.slice/crio-577c07fcdb469b6e61e58dccc7e264796f430f316b35bebd73461092bc91abe5 WatchSource:0}: Error finding container 577c07fcdb469b6e61e58dccc7e264796f430f316b35bebd73461092bc91abe5: Status 404 returned error can't find the container with id 577c07fcdb469b6e61e58dccc7e264796f430f316b35bebd73461092bc91abe5 Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.201045 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-585944bbfc-88glg"] Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.388969 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.808756 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-585944bbfc-88glg" event={"ID":"abd5c89e-6590-4494-a8fd-6a2d8e1adce8","Type":"ContainerStarted","Data":"c093237e5704a4264859ecfd5a1fc02e97da19e8f2d995efbded9185b2eab7d9"} Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.835238 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb49bd5bf-n7hcd" event={"ID":"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8","Type":"ContainerStarted","Data":"334742d7afee70be7856fe2f0f1a73c8408a8d0a7d3ae842f436ae7d2cb5c32f"} Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.844267 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"482223e1-1a82-46c5-808d-ac8f963c7c09","Type":"ContainerStarted","Data":"577c07fcdb469b6e61e58dccc7e264796f430f316b35bebd73461092bc91abe5"} Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.859148 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a","Type":"ContainerStarted","Data":"7f17b1c4ba5e48ec1883ea0d57bd58abbf334dc687a6f89ec754a52efa6a0dc9"} Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.867934 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9412cadc-fa96-4a40-b71e-7df2f560cc9a","Type":"ContainerStarted","Data":"f471f4982d9c22f94d37a3bb0cbe50dc741d0bb1bfd7f15239a1bdb89420de4f"} Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.877985 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume2-0" event={"ID":"9dafc73c-4956-4f69-92c1-e9bb3957e8fe","Type":"ContainerStarted","Data":"2fa6a6577ff49e9984d4a8ff5b08e37c8f3158c2b46f4598be6315d7fb54bafe"} Sep 29 14:38:09 crc kubenswrapper[4869]: I0929 14:38:09.885079 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.656243 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-585944bbfc-88glg"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.682061 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-c5bdb566d-s22fb"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.748557 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c5bdb566d-s22fb"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.749351 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.753510 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.775903 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.814252 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7bb49bd5bf-n7hcd"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.885312 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e3518b4-923d-4aea-abb6-f0acc24b11d6-logs\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.889436 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-tls-certs\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.889809 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-scripts\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.890012 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-secret-key\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.890214 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-config-data\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.890372 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69rlg\" (UniqueName: \"kubernetes.io/projected/3e3518b4-923d-4aea-abb6-f0acc24b11d6-kube-api-access-69rlg\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.890518 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-combined-ca-bundle\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.916079 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-74f556cb8d-4pwqg"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.918136 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.957858 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-74f556cb8d-4pwqg"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.970858 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.985674 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"404cac7d-3af0-4f4f-bdd8-fc3eec4b512a","Type":"ContainerStarted","Data":"8d9e3677436562d274101726d0c351b8f27dded29a458496c8f67aefe70ea506"} Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.993901 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e3518b4-923d-4aea-abb6-f0acc24b11d6-logs\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.994340 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-tls-certs\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.994599 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-scripts\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.994818 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-secret-key\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.995134 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-config-data\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.995416 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69rlg\" (UniqueName: \"kubernetes.io/projected/3e3518b4-923d-4aea-abb6-f0acc24b11d6-kube-api-access-69rlg\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:10 crc kubenswrapper[4869]: I0929 14:38:10.995584 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-combined-ca-bundle\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.002067 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-scripts\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.003564 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e3518b4-923d-4aea-abb6-f0acc24b11d6-logs\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.007357 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-secret-key\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.007976 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume2-0" event={"ID":"9dafc73c-4956-4f69-92c1-e9bb3957e8fe","Type":"ContainerStarted","Data":"e7c52f1f9b5afd26c471e26e7e608791befdd3ae0f100c8ffd9a1d965a17375d"} Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.008326 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-tls-certs\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.009721 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-config-data\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.010570 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-combined-ca-bundle\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.024215 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=4.147337465 podStartE2EDuration="5.024191594s" podCreationTimestamp="2025-09-29 14:38:06 +0000 UTC" firstStartedPulling="2025-09-29 14:38:08.240064063 +0000 UTC m=+3414.680708383" lastFinishedPulling="2025-09-29 14:38:09.116918192 +0000 UTC m=+3415.557562512" observedRunningTime="2025-09-29 14:38:11.022998122 +0000 UTC m=+3417.463642462" watchObservedRunningTime="2025-09-29 14:38:11.024191594 +0000 UTC m=+3417.464835914" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.026979 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2","Type":"ContainerStarted","Data":"1ee97de597f8fa6499f12ee21a0eb181f0ec46b5c340c1ae3b3fcf78853a61d4"} Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.035746 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"482223e1-1a82-46c5-808d-ac8f963c7c09","Type":"ContainerStarted","Data":"ed6e799bcd78e37a35e65bb7c848554818444b72bcb05e471aa8aa9f3bb970f5"} Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.048366 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69rlg\" (UniqueName: \"kubernetes.io/projected/3e3518b4-923d-4aea-abb6-f0acc24b11d6-kube-api-access-69rlg\") pod \"horizon-c5bdb566d-s22fb\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.100109 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjjkv\" (UniqueName: \"kubernetes.io/projected/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-kube-api-access-rjjkv\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.100238 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-horizon-secret-key\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.100277 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-config-data\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.100300 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-logs\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.100443 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-scripts\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.100474 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-horizon-tls-certs\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.100504 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-combined-ca-bundle\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.172539 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.202528 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-config-data\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.202592 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-logs\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.202717 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-scripts\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.202750 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-horizon-tls-certs\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.202785 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-combined-ca-bundle\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.202816 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjjkv\" (UniqueName: \"kubernetes.io/projected/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-kube-api-access-rjjkv\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.202890 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-horizon-secret-key\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.206867 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-scripts\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.207055 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-config-data\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.207234 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-logs\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.215242 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-combined-ca-bundle\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.215953 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-horizon-secret-key\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.219444 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-horizon-tls-certs\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.232552 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjjkv\" (UniqueName: \"kubernetes.io/projected/696c6bef-f1c0-4d67-9ca8-ccb6bb489141-kube-api-access-rjjkv\") pod \"horizon-74f556cb8d-4pwqg\" (UID: \"696c6bef-f1c0-4d67-9ca8-ccb6bb489141\") " pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.281866 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:11 crc kubenswrapper[4869]: I0929 14:38:11.852338 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c5bdb566d-s22fb"] Sep 29 14:38:11 crc kubenswrapper[4869]: W0929 14:38:11.955991 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e3518b4_923d_4aea_abb6_f0acc24b11d6.slice/crio-36f0f293f36362a505ba2df9802609acf9c6e9f78cb7493ff0e68e10beaab655 WatchSource:0}: Error finding container 36f0f293f36362a505ba2df9802609acf9c6e9f78cb7493ff0e68e10beaab655: Status 404 returned error can't find the container with id 36f0f293f36362a505ba2df9802609acf9c6e9f78cb7493ff0e68e10beaab655 Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.091013 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c5bdb566d-s22fb" event={"ID":"3e3518b4-923d-4aea-abb6-f0acc24b11d6","Type":"ContainerStarted","Data":"36f0f293f36362a505ba2df9802609acf9c6e9f78cb7493ff0e68e10beaab655"} Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.103915 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"482223e1-1a82-46c5-808d-ac8f963c7c09","Type":"ContainerStarted","Data":"15516165d820aa7018ce076cc24921bedee322a1ad642206c6f95c3861864e85"} Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.111838 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9412cadc-fa96-4a40-b71e-7df2f560cc9a","Type":"ContainerStarted","Data":"76983e587a1fd4b54a1b93fa8c695dc11aeba2c397291a3e48c8fa84b9078cb5"} Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.145869 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-74f556cb8d-4pwqg"] Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.149226 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume2-0" event={"ID":"9dafc73c-4956-4f69-92c1-e9bb3957e8fe","Type":"ContainerStarted","Data":"bd08f5adf80828caf3a646581c0a8cb6723027dc60e78a012ec923d8e358d6be"} Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.151468 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=5.338223635 podStartE2EDuration="6.15144666s" podCreationTimestamp="2025-09-29 14:38:06 +0000 UTC" firstStartedPulling="2025-09-29 14:38:09.121121452 +0000 UTC m=+3415.561765762" lastFinishedPulling="2025-09-29 14:38:09.934344467 +0000 UTC m=+3416.374988787" observedRunningTime="2025-09-29 14:38:12.138239165 +0000 UTC m=+3418.578883485" watchObservedRunningTime="2025-09-29 14:38:12.15144666 +0000 UTC m=+3418.592090980" Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.159347 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2","Type":"ContainerStarted","Data":"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3"} Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.195731 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume2-0" podStartSLOduration=4.438792275 podStartE2EDuration="5.195699337s" podCreationTimestamp="2025-09-29 14:38:07 +0000 UTC" firstStartedPulling="2025-09-29 14:38:09.052062226 +0000 UTC m=+3415.492706546" lastFinishedPulling="2025-09-29 14:38:09.808969288 +0000 UTC m=+3416.249613608" observedRunningTime="2025-09-29 14:38:12.183235841 +0000 UTC m=+3418.623880151" watchObservedRunningTime="2025-09-29 14:38:12.195699337 +0000 UTC m=+3418.636343657" Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.219524 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.287593 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:12 crc kubenswrapper[4869]: I0929 14:38:12.713849 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.207377 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2","Type":"ContainerStarted","Data":"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e"} Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.207647 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerName="glance-log" containerID="cri-o://97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3" gracePeriod=30 Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.207784 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerName="glance-httpd" containerID="cri-o://86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e" gracePeriod=30 Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.220367 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9412cadc-fa96-4a40-b71e-7df2f560cc9a","Type":"ContainerStarted","Data":"bda820b8e0583f52e6893468f957b1a2045084ddd37bb07522f5bd09eeda90dd"} Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.220525 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerName="glance-log" containerID="cri-o://76983e587a1fd4b54a1b93fa8c695dc11aeba2c397291a3e48c8fa84b9078cb5" gracePeriod=30 Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.220556 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerName="glance-httpd" containerID="cri-o://bda820b8e0583f52e6893468f957b1a2045084ddd37bb07522f5bd09eeda90dd" gracePeriod=30 Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.238012 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74f556cb8d-4pwqg" event={"ID":"696c6bef-f1c0-4d67-9ca8-ccb6bb489141","Type":"ContainerStarted","Data":"4edcfd1b7ed2e06ca9f40f958caaeebc31f2e8793f76ae8b5fca8e61370616b7"} Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.256685 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.25665263 podStartE2EDuration="6.25665263s" podCreationTimestamp="2025-09-29 14:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:38:13.235846506 +0000 UTC m=+3419.676490826" watchObservedRunningTime="2025-09-29 14:38:13.25665263 +0000 UTC m=+3419.697296950" Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.295378 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.295342971 podStartE2EDuration="6.295342971s" podCreationTimestamp="2025-09-29 14:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:38:13.284014275 +0000 UTC m=+3419.724658615" watchObservedRunningTime="2025-09-29 14:38:13.295342971 +0000 UTC m=+3419.735987291" Sep 29 14:38:13 crc kubenswrapper[4869]: I0929 14:38:13.987462 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.126206 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-config-data\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.126397 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tp4tn\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-kube-api-access-tp4tn\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.126502 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-ceph\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.126534 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-httpd-run\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.126586 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-combined-ca-bundle\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.126649 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-logs\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.127495 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-logs" (OuterVolumeSpecName: "logs") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.127869 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.127952 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.128075 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-internal-tls-certs\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.128132 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-scripts\") pod \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\" (UID: \"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.139397 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-ceph" (OuterVolumeSpecName: "ceph") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.139602 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-kube-api-access-tp4tn" (OuterVolumeSpecName: "kube-api-access-tp4tn") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "kube-api-access-tp4tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.139754 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.145994 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-scripts" (OuterVolumeSpecName: "scripts") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.150692 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tp4tn\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-kube-api-access-tp4tn\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.150734 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.150746 4869 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-httpd-run\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.150756 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.150791 4869 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.150802 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.220271 4869 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.237898 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.253470 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.253500 4869 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.255369 4869 generic.go:334] "Generic (PLEG): container finished" podID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerID="86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e" exitCode=143 Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.255414 4869 generic.go:334] "Generic (PLEG): container finished" podID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerID="97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3" exitCode=143 Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.255579 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.263368 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.281042 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2","Type":"ContainerDied","Data":"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e"} Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.281109 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2","Type":"ContainerDied","Data":"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3"} Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.281120 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2","Type":"ContainerDied","Data":"1ee97de597f8fa6499f12ee21a0eb181f0ec46b5c340c1ae3b3fcf78853a61d4"} Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.281142 4869 scope.go:117] "RemoveContainer" containerID="86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.281176 4869 generic.go:334] "Generic (PLEG): container finished" podID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerID="bda820b8e0583f52e6893468f957b1a2045084ddd37bb07522f5bd09eeda90dd" exitCode=143 Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.281219 4869 generic.go:334] "Generic (PLEG): container finished" podID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerID="76983e587a1fd4b54a1b93fa8c695dc11aeba2c397291a3e48c8fa84b9078cb5" exitCode=143 Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.281745 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9412cadc-fa96-4a40-b71e-7df2f560cc9a","Type":"ContainerDied","Data":"bda820b8e0583f52e6893468f957b1a2045084ddd37bb07522f5bd09eeda90dd"} Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.286828 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9412cadc-fa96-4a40-b71e-7df2f560cc9a","Type":"ContainerDied","Data":"76983e587a1fd4b54a1b93fa8c695dc11aeba2c397291a3e48c8fa84b9078cb5"} Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.315025 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.355965 4869 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.370281 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-config-data" (OuterVolumeSpecName: "config-data") pod "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" (UID: "e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.457366 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-ceph\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.457545 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-scripts\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.457931 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lq9cc\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-kube-api-access-lq9cc\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.457976 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-logs\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.458030 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-combined-ca-bundle\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.458063 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.458187 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-config-data\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.458375 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-public-tls-certs\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.458472 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-httpd-run\") pod \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\" (UID: \"9412cadc-fa96-4a40-b71e-7df2f560cc9a\") " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.459459 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.460433 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.465841 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.466101 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-logs" (OuterVolumeSpecName: "logs") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.467039 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-ceph" (OuterVolumeSpecName: "ceph") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.468342 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-kube-api-access-lq9cc" (OuterVolumeSpecName: "kube-api-access-lq9cc") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "kube-api-access-lq9cc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.469871 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-scripts" (OuterVolumeSpecName: "scripts") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.539216 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.550877 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-config-data" (OuterVolumeSpecName: "config-data") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.559105 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "9412cadc-fa96-4a40-b71e-7df2f560cc9a" (UID: "9412cadc-fa96-4a40-b71e-7df2f560cc9a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562397 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562440 4869 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562451 4869 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-httpd-run\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562463 4869 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-ceph\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562471 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562481 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lq9cc\" (UniqueName: \"kubernetes.io/projected/9412cadc-fa96-4a40-b71e-7df2f560cc9a-kube-api-access-lq9cc\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562490 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9412cadc-fa96-4a40-b71e-7df2f560cc9a-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562498 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9412cadc-fa96-4a40-b71e-7df2f560cc9a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.562576 4869 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.588708 4869 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.667901 4869 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.712080 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.736512 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.769234 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:14 crc kubenswrapper[4869]: E0929 14:38:14.773629 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerName="glance-log" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.773697 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerName="glance-log" Sep 29 14:38:14 crc kubenswrapper[4869]: E0929 14:38:14.773740 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerName="glance-httpd" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.773757 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerName="glance-httpd" Sep 29 14:38:14 crc kubenswrapper[4869]: E0929 14:38:14.773819 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerName="glance-log" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.773836 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerName="glance-log" Sep 29 14:38:14 crc kubenswrapper[4869]: E0929 14:38:14.773860 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerName="glance-httpd" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.773869 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerName="glance-httpd" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.793997 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerName="glance-log" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.794877 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerName="glance-log" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.794897 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" containerName="glance-httpd" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.794914 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" containerName="glance-httpd" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.795856 4869 scope.go:117] "RemoveContainer" containerID="97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.799197 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.806462 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.807196 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.809993 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.850353 4869 scope.go:117] "RemoveContainer" containerID="86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e" Sep 29 14:38:14 crc kubenswrapper[4869]: E0929 14:38:14.858584 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e\": container with ID starting with 86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e not found: ID does not exist" containerID="86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.858821 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e"} err="failed to get container status \"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e\": rpc error: code = NotFound desc = could not find container \"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e\": container with ID starting with 86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e not found: ID does not exist" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.858864 4869 scope.go:117] "RemoveContainer" containerID="97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3" Sep 29 14:38:14 crc kubenswrapper[4869]: E0929 14:38:14.859363 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3\": container with ID starting with 97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3 not found: ID does not exist" containerID="97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.859385 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3"} err="failed to get container status \"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3\": rpc error: code = NotFound desc = could not find container \"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3\": container with ID starting with 97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3 not found: ID does not exist" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.859400 4869 scope.go:117] "RemoveContainer" containerID="86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.859937 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e"} err="failed to get container status \"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e\": rpc error: code = NotFound desc = could not find container \"86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e\": container with ID starting with 86bee900548e18c700253fbbc3277e25cd26025ee7ffc800bae6f5527ade5b7e not found: ID does not exist" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.859957 4869 scope.go:117] "RemoveContainer" containerID="97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.863459 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3"} err="failed to get container status \"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3\": rpc error: code = NotFound desc = could not find container \"97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3\": container with ID starting with 97ab7de3fe5ec8f5aa87766312e58ce9e662aeedb00b8b1ae04fe3f49b516ed3 not found: ID does not exist" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.874870 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-ceph\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.874986 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.875028 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwm2d\" (UniqueName: \"kubernetes.io/projected/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-kube-api-access-jwm2d\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.875067 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.875101 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.875156 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.875187 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.875218 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.875310 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-logs\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.980733 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.981176 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.981293 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-logs\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.981319 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-ceph\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.981382 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.981422 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwm2d\" (UniqueName: \"kubernetes.io/projected/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-kube-api-access-jwm2d\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.981470 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.981505 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.981569 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.982132 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.982727 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:14 crc kubenswrapper[4869]: I0929 14:38:14.983222 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-logs\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:14.993867 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.016430 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwm2d\" (UniqueName: \"kubernetes.io/projected/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-kube-api-access-jwm2d\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.016765 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-ceph\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.017476 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.017937 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.032520 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07fe876e-5a7e-48ca-b91b-44c5fc9129b2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.102022 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"07fe876e-5a7e-48ca-b91b-44c5fc9129b2\") " pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.144260 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.346027 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9412cadc-fa96-4a40-b71e-7df2f560cc9a","Type":"ContainerDied","Data":"f471f4982d9c22f94d37a3bb0cbe50dc741d0bb1bfd7f15239a1bdb89420de4f"} Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.346093 4869 scope.go:117] "RemoveContainer" containerID="bda820b8e0583f52e6893468f957b1a2045084ddd37bb07522f5bd09eeda90dd" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.346259 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.463595 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.482981 4869 scope.go:117] "RemoveContainer" containerID="76983e587a1fd4b54a1b93fa8c695dc11aeba2c397291a3e48c8fa84b9078cb5" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.492857 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.520510 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.523352 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.528088 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.528423 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.555143 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.708863 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-config-data\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.708930 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.708959 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6930ce7-8080-4396-8b97-92edef318edf-logs\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.708986 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.709046 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-scripts\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.709071 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d6930ce7-8080-4396-8b97-92edef318edf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.709141 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.709183 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnkvl\" (UniqueName: \"kubernetes.io/projected/d6930ce7-8080-4396-8b97-92edef318edf-kube-api-access-cnkvl\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.709216 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d6930ce7-8080-4396-8b97-92edef318edf-ceph\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.811254 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.811906 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-scripts\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.811947 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d6930ce7-8080-4396-8b97-92edef318edf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.812045 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.812102 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnkvl\" (UniqueName: \"kubernetes.io/projected/d6930ce7-8080-4396-8b97-92edef318edf-kube-api-access-cnkvl\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.812136 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d6930ce7-8080-4396-8b97-92edef318edf-ceph\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.812347 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-config-data\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.812389 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.812415 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6930ce7-8080-4396-8b97-92edef318edf-logs\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.812565 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d6930ce7-8080-4396-8b97-92edef318edf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.813023 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6930ce7-8080-4396-8b97-92edef318edf-logs\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.813571 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.826725 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.834017 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-scripts\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.835662 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.837982 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6930ce7-8080-4396-8b97-92edef318edf-config-data\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.846656 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnkvl\" (UniqueName: \"kubernetes.io/projected/d6930ce7-8080-4396-8b97-92edef318edf-kube-api-access-cnkvl\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.865490 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d6930ce7-8080-4396-8b97-92edef318edf-ceph\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.872502 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"d6930ce7-8080-4396-8b97-92edef318edf\") " pod="openstack/glance-default-external-api-0" Sep 29 14:38:15 crc kubenswrapper[4869]: I0929 14:38:15.880927 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Sep 29 14:38:16 crc kubenswrapper[4869]: I0929 14:38:16.169295 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Sep 29 14:38:16 crc kubenswrapper[4869]: W0929 14:38:16.170560 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07fe876e_5a7e_48ca_b91b_44c5fc9129b2.slice/crio-43aba79b67228d07aa91ec466d7ca61d17d32c1ce83079fde899f14e4a4dc4e6 WatchSource:0}: Error finding container 43aba79b67228d07aa91ec466d7ca61d17d32c1ce83079fde899f14e4a4dc4e6: Status 404 returned error can't find the container with id 43aba79b67228d07aa91ec466d7ca61d17d32c1ce83079fde899f14e4a4dc4e6 Sep 29 14:38:16 crc kubenswrapper[4869]: I0929 14:38:16.276962 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9412cadc-fa96-4a40-b71e-7df2f560cc9a" path="/var/lib/kubelet/pods/9412cadc-fa96-4a40-b71e-7df2f560cc9a/volumes" Sep 29 14:38:16 crc kubenswrapper[4869]: I0929 14:38:16.278552 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2" path="/var/lib/kubelet/pods/e6c89e04-b52b-4d09-9ee9-d42ddbe70ff2/volumes" Sep 29 14:38:16 crc kubenswrapper[4869]: I0929 14:38:16.403857 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"07fe876e-5a7e-48ca-b91b-44c5fc9129b2","Type":"ContainerStarted","Data":"43aba79b67228d07aa91ec466d7ca61d17d32c1ce83079fde899f14e4a4dc4e6"} Sep 29 14:38:16 crc kubenswrapper[4869]: I0929 14:38:16.640778 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Sep 29 14:38:17 crc kubenswrapper[4869]: I0929 14:38:17.396491 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Sep 29 14:38:17 crc kubenswrapper[4869]: I0929 14:38:17.455031 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d6930ce7-8080-4396-8b97-92edef318edf","Type":"ContainerStarted","Data":"c084fa0f8aa386500a05502bf6eb76abb2f8e15adbc294947b76b8e89d8ce539"} Sep 29 14:38:17 crc kubenswrapper[4869]: I0929 14:38:17.455478 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Sep 29 14:38:17 crc kubenswrapper[4869]: I0929 14:38:17.914834 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume2-0" Sep 29 14:38:18 crc kubenswrapper[4869]: I0929 14:38:18.473945 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"07fe876e-5a7e-48ca-b91b-44c5fc9129b2","Type":"ContainerStarted","Data":"09b45769ebc8d2772e517e652bc5e24193bf38223801f6449e0d7a4d7d87fb03"} Sep 29 14:38:18 crc kubenswrapper[4869]: I0929 14:38:18.474404 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"07fe876e-5a7e-48ca-b91b-44c5fc9129b2","Type":"ContainerStarted","Data":"d3699226f94a8c8d62c395e754daa231318d6665892ff8379b8a11a5f85ce119"} Sep 29 14:38:18 crc kubenswrapper[4869]: I0929 14:38:18.479107 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d6930ce7-8080-4396-8b97-92edef318edf","Type":"ContainerStarted","Data":"fb02eeb5e76bc6116602aa7965d1e14ac3226a11b95b901d56292fccab3121c7"} Sep 29 14:38:19 crc kubenswrapper[4869]: I0929 14:38:19.534950 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.534921647 podStartE2EDuration="5.534921647s" podCreationTimestamp="2025-09-29 14:38:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:38:19.524400972 +0000 UTC m=+3425.965045292" watchObservedRunningTime="2025-09-29 14:38:19.534921647 +0000 UTC m=+3425.975565957" Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.568262 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d6930ce7-8080-4396-8b97-92edef318edf","Type":"ContainerStarted","Data":"170d0cdea23b70c075b7a47c73e2860e268c782d97d5a3e98a0058848389bda2"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.570987 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb49bd5bf-n7hcd" event={"ID":"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8","Type":"ContainerStarted","Data":"ce41cd63a4f84a56cd095e8feb2b4b2a4cf2bf99c1d06d9859e70db5d9071955"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.571047 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb49bd5bf-n7hcd" event={"ID":"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8","Type":"ContainerStarted","Data":"ef2209aab7ca99aa8caadde1c14f835d8d4dd1a4b71b4d204d2d607b012e792a"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.571090 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7bb49bd5bf-n7hcd" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerName="horizon-log" containerID="cri-o://ef2209aab7ca99aa8caadde1c14f835d8d4dd1a4b71b4d204d2d607b012e792a" gracePeriod=30 Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.571154 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7bb49bd5bf-n7hcd" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerName="horizon" containerID="cri-o://ce41cd63a4f84a56cd095e8feb2b4b2a4cf2bf99c1d06d9859e70db5d9071955" gracePeriod=30 Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.574525 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-585944bbfc-88glg" event={"ID":"abd5c89e-6590-4494-a8fd-6a2d8e1adce8","Type":"ContainerStarted","Data":"f82ee98b30289ea2bb98e52ff1faa8537c552599708a4a1f59034bca5fb5ceff"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.574571 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-585944bbfc-88glg" event={"ID":"abd5c89e-6590-4494-a8fd-6a2d8e1adce8","Type":"ContainerStarted","Data":"bfbb3e1d71cfcb75961723547f1e4b17f0b3b7f93d6d1f86f79dbbdddce0687a"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.574595 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-585944bbfc-88glg" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerName="horizon-log" containerID="cri-o://bfbb3e1d71cfcb75961723547f1e4b17f0b3b7f93d6d1f86f79dbbdddce0687a" gracePeriod=30 Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.574682 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-585944bbfc-88glg" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerName="horizon" containerID="cri-o://f82ee98b30289ea2bb98e52ff1faa8537c552599708a4a1f59034bca5fb5ceff" gracePeriod=30 Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.578949 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c5bdb566d-s22fb" event={"ID":"3e3518b4-923d-4aea-abb6-f0acc24b11d6","Type":"ContainerStarted","Data":"a1a02ccd33d7d914cfd0dcd8969ab58ccd4fe3056a98cc556b4c27fbf6ced241"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.578994 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c5bdb566d-s22fb" event={"ID":"3e3518b4-923d-4aea-abb6-f0acc24b11d6","Type":"ContainerStarted","Data":"5f405495892b82c03162b657e9c32db8f7995437f67a1fc5d95d957a98c45b98"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.583262 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74f556cb8d-4pwqg" event={"ID":"696c6bef-f1c0-4d67-9ca8-ccb6bb489141","Type":"ContainerStarted","Data":"f15f96e64909ee1a23f1be05c8db49e704f772bbe584f9e922ea56de0dd6052c"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.583303 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74f556cb8d-4pwqg" event={"ID":"696c6bef-f1c0-4d67-9ca8-ccb6bb489141","Type":"ContainerStarted","Data":"d7db1fcd3d2e0d824a7e7c15984c1916505344e3c91219417b890aa601ba56c0"} Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.629236 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.629213476 podStartE2EDuration="9.629213476s" podCreationTimestamp="2025-09-29 14:38:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:38:24.601224324 +0000 UTC m=+3431.041868664" watchObservedRunningTime="2025-09-29 14:38:24.629213476 +0000 UTC m=+3431.069857796" Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.631896 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-74f556cb8d-4pwqg" podStartSLOduration=3.031295887 podStartE2EDuration="14.631885955s" podCreationTimestamp="2025-09-29 14:38:10 +0000 UTC" firstStartedPulling="2025-09-29 14:38:12.222685703 +0000 UTC m=+3418.663330023" lastFinishedPulling="2025-09-29 14:38:23.823275771 +0000 UTC m=+3430.263920091" observedRunningTime="2025-09-29 14:38:24.621522754 +0000 UTC m=+3431.062167074" watchObservedRunningTime="2025-09-29 14:38:24.631885955 +0000 UTC m=+3431.072530276" Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.655170 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-c5bdb566d-s22fb" podStartSLOduration=2.796678172 podStartE2EDuration="14.655134273s" podCreationTimestamp="2025-09-29 14:38:10 +0000 UTC" firstStartedPulling="2025-09-29 14:38:11.985922112 +0000 UTC m=+3418.426566422" lastFinishedPulling="2025-09-29 14:38:23.844378203 +0000 UTC m=+3430.285022523" observedRunningTime="2025-09-29 14:38:24.641651841 +0000 UTC m=+3431.082296191" watchObservedRunningTime="2025-09-29 14:38:24.655134273 +0000 UTC m=+3431.095778593" Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.673754 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7bb49bd5bf-n7hcd" podStartSLOduration=2.916691336 podStartE2EDuration="17.673498804s" podCreationTimestamp="2025-09-29 14:38:07 +0000 UTC" firstStartedPulling="2025-09-29 14:38:09.108120052 +0000 UTC m=+3415.548764372" lastFinishedPulling="2025-09-29 14:38:23.86492752 +0000 UTC m=+3430.305571840" observedRunningTime="2025-09-29 14:38:24.669240112 +0000 UTC m=+3431.109884432" watchObservedRunningTime="2025-09-29 14:38:24.673498804 +0000 UTC m=+3431.114143124" Sep 29 14:38:24 crc kubenswrapper[4869]: I0929 14:38:24.709789 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-585944bbfc-88glg" podStartSLOduration=3.153890688 podStartE2EDuration="17.709748642s" podCreationTimestamp="2025-09-29 14:38:07 +0000 UTC" firstStartedPulling="2025-09-29 14:38:09.253991726 +0000 UTC m=+3415.694636036" lastFinishedPulling="2025-09-29 14:38:23.80984967 +0000 UTC m=+3430.250493990" observedRunningTime="2025-09-29 14:38:24.699278048 +0000 UTC m=+3431.139922388" watchObservedRunningTime="2025-09-29 14:38:24.709748642 +0000 UTC m=+3431.150392972" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.145644 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.146090 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.197223 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.199983 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.595225 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.595288 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.882207 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.882370 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.930347 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Sep 29 14:38:25 crc kubenswrapper[4869]: I0929 14:38:25.949481 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Sep 29 14:38:26 crc kubenswrapper[4869]: I0929 14:38:26.606636 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Sep 29 14:38:26 crc kubenswrapper[4869]: I0929 14:38:26.607170 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Sep 29 14:38:27 crc kubenswrapper[4869]: I0929 14:38:27.907510 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:28 crc kubenswrapper[4869]: I0929 14:38:28.015429 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:30 crc kubenswrapper[4869]: I0929 14:38:30.264886 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:30 crc kubenswrapper[4869]: I0929 14:38:30.265259 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Sep 29 14:38:30 crc kubenswrapper[4869]: I0929 14:38:30.276275 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Sep 29 14:38:31 crc kubenswrapper[4869]: I0929 14:38:31.173454 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:31 crc kubenswrapper[4869]: I0929 14:38:31.173517 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:31 crc kubenswrapper[4869]: I0929 14:38:31.283048 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:31 crc kubenswrapper[4869]: I0929 14:38:31.283459 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:32 crc kubenswrapper[4869]: I0929 14:38:32.381271 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Sep 29 14:38:43 crc kubenswrapper[4869]: I0929 14:38:43.365743 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:43 crc kubenswrapper[4869]: I0929 14:38:43.471768 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:45 crc kubenswrapper[4869]: I0929 14:38:45.392173 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:38:45 crc kubenswrapper[4869]: I0929 14:38:45.455437 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-74f556cb8d-4pwqg" Sep 29 14:38:45 crc kubenswrapper[4869]: I0929 14:38:45.541732 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c5bdb566d-s22fb"] Sep 29 14:38:45 crc kubenswrapper[4869]: I0929 14:38:45.824306 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-c5bdb566d-s22fb" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon-log" containerID="cri-o://5f405495892b82c03162b657e9c32db8f7995437f67a1fc5d95d957a98c45b98" gracePeriod=30 Sep 29 14:38:45 crc kubenswrapper[4869]: I0929 14:38:45.824423 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-c5bdb566d-s22fb" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon" containerID="cri-o://a1a02ccd33d7d914cfd0dcd8969ab58ccd4fe3056a98cc556b4c27fbf6ced241" gracePeriod=30 Sep 29 14:38:47 crc kubenswrapper[4869]: I0929 14:38:47.851796 4869 generic.go:334] "Generic (PLEG): container finished" podID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerID="a1a02ccd33d7d914cfd0dcd8969ab58ccd4fe3056a98cc556b4c27fbf6ced241" exitCode=0 Sep 29 14:38:47 crc kubenswrapper[4869]: I0929 14:38:47.851903 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c5bdb566d-s22fb" event={"ID":"3e3518b4-923d-4aea-abb6-f0acc24b11d6","Type":"ContainerDied","Data":"a1a02ccd33d7d914cfd0dcd8969ab58ccd4fe3056a98cc556b4c27fbf6ced241"} Sep 29 14:38:50 crc kubenswrapper[4869]: I0929 14:38:50.660525 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:38:50 crc kubenswrapper[4869]: I0929 14:38:50.661807 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:38:51 crc kubenswrapper[4869]: I0929 14:38:51.173857 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-c5bdb566d-s22fb" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.9:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.9:8443: connect: connection refused" Sep 29 14:38:54 crc kubenswrapper[4869]: I0929 14:38:54.924844 4869 generic.go:334] "Generic (PLEG): container finished" podID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerID="f82ee98b30289ea2bb98e52ff1faa8537c552599708a4a1f59034bca5fb5ceff" exitCode=137 Sep 29 14:38:54 crc kubenswrapper[4869]: I0929 14:38:54.925545 4869 generic.go:334] "Generic (PLEG): container finished" podID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerID="bfbb3e1d71cfcb75961723547f1e4b17f0b3b7f93d6d1f86f79dbbdddce0687a" exitCode=137 Sep 29 14:38:54 crc kubenswrapper[4869]: I0929 14:38:54.925011 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-585944bbfc-88glg" event={"ID":"abd5c89e-6590-4494-a8fd-6a2d8e1adce8","Type":"ContainerDied","Data":"f82ee98b30289ea2bb98e52ff1faa8537c552599708a4a1f59034bca5fb5ceff"} Sep 29 14:38:54 crc kubenswrapper[4869]: I0929 14:38:54.925660 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-585944bbfc-88glg" event={"ID":"abd5c89e-6590-4494-a8fd-6a2d8e1adce8","Type":"ContainerDied","Data":"bfbb3e1d71cfcb75961723547f1e4b17f0b3b7f93d6d1f86f79dbbdddce0687a"} Sep 29 14:38:54 crc kubenswrapper[4869]: I0929 14:38:54.928785 4869 generic.go:334] "Generic (PLEG): container finished" podID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerID="ce41cd63a4f84a56cd095e8feb2b4b2a4cf2bf99c1d06d9859e70db5d9071955" exitCode=137 Sep 29 14:38:54 crc kubenswrapper[4869]: I0929 14:38:54.928829 4869 generic.go:334] "Generic (PLEG): container finished" podID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerID="ef2209aab7ca99aa8caadde1c14f835d8d4dd1a4b71b4d204d2d607b012e792a" exitCode=137 Sep 29 14:38:54 crc kubenswrapper[4869]: I0929 14:38:54.928856 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb49bd5bf-n7hcd" event={"ID":"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8","Type":"ContainerDied","Data":"ce41cd63a4f84a56cd095e8feb2b4b2a4cf2bf99c1d06d9859e70db5d9071955"} Sep 29 14:38:54 crc kubenswrapper[4869]: I0929 14:38:54.928894 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb49bd5bf-n7hcd" event={"ID":"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8","Type":"ContainerDied","Data":"ef2209aab7ca99aa8caadde1c14f835d8d4dd1a4b71b4d204d2d607b012e792a"} Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.333734 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.338396 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507089 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-horizon-secret-key\") pod \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507316 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-config-data\") pod \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507347 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-scripts\") pod \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507435 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-scripts\") pod \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507504 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47kjt\" (UniqueName: \"kubernetes.io/projected/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-kube-api-access-47kjt\") pod \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507537 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp6cs\" (UniqueName: \"kubernetes.io/projected/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-kube-api-access-rp6cs\") pod \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507646 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-config-data\") pod \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507726 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-logs\") pod \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507788 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-horizon-secret-key\") pod \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\" (UID: \"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.507826 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-logs\") pod \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\" (UID: \"abd5c89e-6590-4494-a8fd-6a2d8e1adce8\") " Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.509128 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-logs" (OuterVolumeSpecName: "logs") pod "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" (UID: "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.509900 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-logs" (OuterVolumeSpecName: "logs") pod "abd5c89e-6590-4494-a8fd-6a2d8e1adce8" (UID: "abd5c89e-6590-4494-a8fd-6a2d8e1adce8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.511530 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.517547 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" (UID: "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.517581 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-kube-api-access-47kjt" (OuterVolumeSpecName: "kube-api-access-47kjt") pod "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" (UID: "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8"). InnerVolumeSpecName "kube-api-access-47kjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.522544 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "abd5c89e-6590-4494-a8fd-6a2d8e1adce8" (UID: "abd5c89e-6590-4494-a8fd-6a2d8e1adce8"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.526562 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-kube-api-access-rp6cs" (OuterVolumeSpecName: "kube-api-access-rp6cs") pod "abd5c89e-6590-4494-a8fd-6a2d8e1adce8" (UID: "abd5c89e-6590-4494-a8fd-6a2d8e1adce8"). InnerVolumeSpecName "kube-api-access-rp6cs". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.549357 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-config-data" (OuterVolumeSpecName: "config-data") pod "abd5c89e-6590-4494-a8fd-6a2d8e1adce8" (UID: "abd5c89e-6590-4494-a8fd-6a2d8e1adce8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.560123 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-scripts" (OuterVolumeSpecName: "scripts") pod "abd5c89e-6590-4494-a8fd-6a2d8e1adce8" (UID: "abd5c89e-6590-4494-a8fd-6a2d8e1adce8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.562393 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-scripts" (OuterVolumeSpecName: "scripts") pod "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" (UID: "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.564171 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-config-data" (OuterVolumeSpecName: "config-data") pod "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" (UID: "7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613651 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613697 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613710 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47kjt\" (UniqueName: \"kubernetes.io/projected/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-kube-api-access-47kjt\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613723 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp6cs\" (UniqueName: \"kubernetes.io/projected/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-kube-api-access-rp6cs\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613735 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613744 4869 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613753 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613762 4869 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.613770 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abd5c89e-6590-4494-a8fd-6a2d8e1adce8-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.956693 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-585944bbfc-88glg" event={"ID":"abd5c89e-6590-4494-a8fd-6a2d8e1adce8","Type":"ContainerDied","Data":"c093237e5704a4264859ecfd5a1fc02e97da19e8f2d995efbded9185b2eab7d9"} Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.957313 4869 scope.go:117] "RemoveContainer" containerID="f82ee98b30289ea2bb98e52ff1faa8537c552599708a4a1f59034bca5fb5ceff" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.956769 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-585944bbfc-88glg" Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.966404 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb49bd5bf-n7hcd" event={"ID":"7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8","Type":"ContainerDied","Data":"334742d7afee70be7856fe2f0f1a73c8408a8d0a7d3ae842f436ae7d2cb5c32f"} Sep 29 14:38:55 crc kubenswrapper[4869]: I0929 14:38:55.966530 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb49bd5bf-n7hcd" Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.014966 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7bb49bd5bf-n7hcd"] Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.028803 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7bb49bd5bf-n7hcd"] Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.038971 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-585944bbfc-88glg"] Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.049477 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-585944bbfc-88glg"] Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.175266 4869 scope.go:117] "RemoveContainer" containerID="bfbb3e1d71cfcb75961723547f1e4b17f0b3b7f93d6d1f86f79dbbdddce0687a" Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.203939 4869 scope.go:117] "RemoveContainer" containerID="ce41cd63a4f84a56cd095e8feb2b4b2a4cf2bf99c1d06d9859e70db5d9071955" Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.256916 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" path="/var/lib/kubelet/pods/7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8/volumes" Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.257690 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" path="/var/lib/kubelet/pods/abd5c89e-6590-4494-a8fd-6a2d8e1adce8/volumes" Sep 29 14:38:56 crc kubenswrapper[4869]: I0929 14:38:56.403389 4869 scope.go:117] "RemoveContainer" containerID="ef2209aab7ca99aa8caadde1c14f835d8d4dd1a4b71b4d204d2d607b012e792a" Sep 29 14:39:01 crc kubenswrapper[4869]: I0929 14:39:01.173774 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-c5bdb566d-s22fb" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.9:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.9:8443: connect: connection refused" Sep 29 14:39:11 crc kubenswrapper[4869]: I0929 14:39:11.174396 4869 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-c5bdb566d-s22fb" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.9:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.9:8443: connect: connection refused" Sep 29 14:39:11 crc kubenswrapper[4869]: I0929 14:39:11.175371 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.253669 4869 generic.go:334] "Generic (PLEG): container finished" podID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerID="5f405495892b82c03162b657e9c32db8f7995437f67a1fc5d95d957a98c45b98" exitCode=137 Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.264583 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c5bdb566d-s22fb" event={"ID":"3e3518b4-923d-4aea-abb6-f0acc24b11d6","Type":"ContainerDied","Data":"5f405495892b82c03162b657e9c32db8f7995437f67a1fc5d95d957a98c45b98"} Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.264661 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c5bdb566d-s22fb" event={"ID":"3e3518b4-923d-4aea-abb6-f0acc24b11d6","Type":"ContainerDied","Data":"36f0f293f36362a505ba2df9802609acf9c6e9f78cb7493ff0e68e10beaab655"} Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.264679 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36f0f293f36362a505ba2df9802609acf9c6e9f78cb7493ff0e68e10beaab655" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.269112 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.376459 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-config-data\") pod \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.376528 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-combined-ca-bundle\") pod \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.376570 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e3518b4-923d-4aea-abb6-f0acc24b11d6-logs\") pod \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.376679 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-tls-certs\") pod \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.376817 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-scripts\") pod \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.376942 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-secret-key\") pod \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.377020 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69rlg\" (UniqueName: \"kubernetes.io/projected/3e3518b4-923d-4aea-abb6-f0acc24b11d6-kube-api-access-69rlg\") pod \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\" (UID: \"3e3518b4-923d-4aea-abb6-f0acc24b11d6\") " Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.377500 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e3518b4-923d-4aea-abb6-f0acc24b11d6-logs" (OuterVolumeSpecName: "logs") pod "3e3518b4-923d-4aea-abb6-f0acc24b11d6" (UID: "3e3518b4-923d-4aea-abb6-f0acc24b11d6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.377694 4869 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e3518b4-923d-4aea-abb6-f0acc24b11d6-logs\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.385492 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3e3518b4-923d-4aea-abb6-f0acc24b11d6" (UID: "3e3518b4-923d-4aea-abb6-f0acc24b11d6"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.385833 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e3518b4-923d-4aea-abb6-f0acc24b11d6-kube-api-access-69rlg" (OuterVolumeSpecName: "kube-api-access-69rlg") pod "3e3518b4-923d-4aea-abb6-f0acc24b11d6" (UID: "3e3518b4-923d-4aea-abb6-f0acc24b11d6"). InnerVolumeSpecName "kube-api-access-69rlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.407983 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-scripts" (OuterVolumeSpecName: "scripts") pod "3e3518b4-923d-4aea-abb6-f0acc24b11d6" (UID: "3e3518b4-923d-4aea-abb6-f0acc24b11d6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.409151 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-config-data" (OuterVolumeSpecName: "config-data") pod "3e3518b4-923d-4aea-abb6-f0acc24b11d6" (UID: "3e3518b4-923d-4aea-abb6-f0acc24b11d6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.414183 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e3518b4-923d-4aea-abb6-f0acc24b11d6" (UID: "3e3518b4-923d-4aea-abb6-f0acc24b11d6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.448284 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "3e3518b4-923d-4aea-abb6-f0acc24b11d6" (UID: "3e3518b4-923d-4aea-abb6-f0acc24b11d6"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.480468 4869 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-scripts\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.480818 4869 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.480905 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69rlg\" (UniqueName: \"kubernetes.io/projected/3e3518b4-923d-4aea-abb6-f0acc24b11d6-kube-api-access-69rlg\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.480977 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3e3518b4-923d-4aea-abb6-f0acc24b11d6-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.481087 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:16 crc kubenswrapper[4869]: I0929 14:39:16.481157 4869 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e3518b4-923d-4aea-abb6-f0acc24b11d6-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:17 crc kubenswrapper[4869]: I0929 14:39:17.265697 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c5bdb566d-s22fb" Sep 29 14:39:17 crc kubenswrapper[4869]: I0929 14:39:17.327968 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c5bdb566d-s22fb"] Sep 29 14:39:17 crc kubenswrapper[4869]: I0929 14:39:17.342210 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-c5bdb566d-s22fb"] Sep 29 14:39:18 crc kubenswrapper[4869]: I0929 14:39:18.257519 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" path="/var/lib/kubelet/pods/3e3518b4-923d-4aea-abb6-f0acc24b11d6/volumes" Sep 29 14:39:19 crc kubenswrapper[4869]: I0929 14:39:19.657152 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:39:19 crc kubenswrapper[4869]: I0929 14:39:19.657777 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="prometheus" containerID="cri-o://2f81a502be902eb7059d41f360b8ca58b8ccbb8f3b5a58c0634ea00280c2d49c" gracePeriod=600 Sep 29 14:39:19 crc kubenswrapper[4869]: I0929 14:39:19.657957 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="thanos-sidecar" containerID="cri-o://bb2716c367616610ddbc81f82b6cd15b2c425a50813b1ce26ab4abba72b43649" gracePeriod=600 Sep 29 14:39:19 crc kubenswrapper[4869]: I0929 14:39:19.658000 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="config-reloader" containerID="cri-o://4d42727421dde2c640e05125b24fa54f80f223c8ffffde6a4bd0648f8f03f89a" gracePeriod=600 Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.303193 4869 generic.go:334] "Generic (PLEG): container finished" podID="52556bdb-2237-4c52-980f-9c3fd051804e" containerID="bb2716c367616610ddbc81f82b6cd15b2c425a50813b1ce26ab4abba72b43649" exitCode=0 Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.303645 4869 generic.go:334] "Generic (PLEG): container finished" podID="52556bdb-2237-4c52-980f-9c3fd051804e" containerID="4d42727421dde2c640e05125b24fa54f80f223c8ffffde6a4bd0648f8f03f89a" exitCode=0 Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.303659 4869 generic.go:334] "Generic (PLEG): container finished" podID="52556bdb-2237-4c52-980f-9c3fd051804e" containerID="2f81a502be902eb7059d41f360b8ca58b8ccbb8f3b5a58c0634ea00280c2d49c" exitCode=0 Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.303364 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerDied","Data":"bb2716c367616610ddbc81f82b6cd15b2c425a50813b1ce26ab4abba72b43649"} Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.303714 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerDied","Data":"4d42727421dde2c640e05125b24fa54f80f223c8ffffde6a4bd0648f8f03f89a"} Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.303736 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerDied","Data":"2f81a502be902eb7059d41f360b8ca58b8ccbb8f3b5a58c0634ea00280c2d49c"} Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.657268 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.657365 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.691467 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.794265 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-tls-assets\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.794340 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-secret-combined-ca-bundle\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.794413 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bxhw\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-kube-api-access-2bxhw\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.794465 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.794631 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-config\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.794769 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/52556bdb-2237-4c52-980f-9c3fd051804e-prometheus-metric-storage-rulefiles-0\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.794943 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.794991 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/52556bdb-2237-4c52-980f-9c3fd051804e-config-out\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.795185 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-thanos-prometheus-http-client-file\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.795240 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.795307 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"52556bdb-2237-4c52-980f-9c3fd051804e\" (UID: \"52556bdb-2237-4c52-980f-9c3fd051804e\") " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.797234 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52556bdb-2237-4c52-980f-9c3fd051804e-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.798441 4869 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/52556bdb-2237-4c52-980f-9c3fd051804e-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.807037 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.807156 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-kube-api-access-2bxhw" (OuterVolumeSpecName: "kube-api-access-2bxhw") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "kube-api-access-2bxhw". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.807388 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52556bdb-2237-4c52-980f-9c3fd051804e-config-out" (OuterVolumeSpecName: "config-out") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.809118 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.816257 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.823146 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-config" (OuterVolumeSpecName: "config") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.827789 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.848945 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.854962 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9". PluginName "kubernetes.io/csi", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901473 4869 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901514 4869 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-tls-assets\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901528 4869 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901542 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bxhw\" (UniqueName: \"kubernetes.io/projected/52556bdb-2237-4c52-980f-9c3fd051804e-kube-api-access-2bxhw\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901554 4869 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901566 4869 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901707 4869 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") on node \"crc\" " Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901727 4869 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/52556bdb-2237-4c52-980f-9c3fd051804e-config-out\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901741 4869 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.901928 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config" (OuterVolumeSpecName: "web-config") pod "52556bdb-2237-4c52-980f-9c3fd051804e" (UID: "52556bdb-2237-4c52-980f-9c3fd051804e"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.932730 4869 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Sep 29 14:39:20 crc kubenswrapper[4869]: I0929 14:39:20.932974 4869 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9") on node "crc" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.003629 4869 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/52556bdb-2237-4c52-980f-9c3fd051804e-web-config\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.003676 4869 reconciler_common.go:293] "Volume detached for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") on node \"crc\" DevicePath \"\"" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.320346 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"52556bdb-2237-4c52-980f-9c3fd051804e","Type":"ContainerDied","Data":"07718be136405e5ef8abe6039b492c979114dc3bc8971ff1d3c183dcad9e09d9"} Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.320873 4869 scope.go:117] "RemoveContainer" containerID="bb2716c367616610ddbc81f82b6cd15b2c425a50813b1ce26ab4abba72b43649" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.320931 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.354059 4869 scope.go:117] "RemoveContainer" containerID="4d42727421dde2c640e05125b24fa54f80f223c8ffffde6a4bd0648f8f03f89a" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.370552 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.388719 4869 scope.go:117] "RemoveContainer" containerID="2f81a502be902eb7059d41f360b8ca58b8ccbb8f3b5a58c0634ea00280c2d49c" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.393740 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.414861 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415504 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="thanos-sidecar" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415530 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="thanos-sidecar" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415546 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="init-config-reloader" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415556 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="init-config-reloader" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415583 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415594 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415629 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415639 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415654 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="config-reloader" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415662 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="config-reloader" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415675 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415684 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415714 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415722 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415742 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415749 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415766 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="prometheus" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415774 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="prometheus" Sep 29 14:39:21 crc kubenswrapper[4869]: E0929 14:39:21.415797 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.415804 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416044 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="config-reloader" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416063 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416079 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416092 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="thanos-sidecar" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416103 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416114 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="abd5c89e-6590-4494-a8fd-6a2d8e1adce8" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416130 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e3518b4-923d-4aea-abb6-f0acc24b11d6" containerName="horizon" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416140 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" containerName="prometheus" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.416147 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a9e1fcb-82aa-449e-9dd9-f9a7c811feb8" containerName="horizon-log" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.418820 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.420855 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-wtzth" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.421070 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.421200 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.421437 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.421759 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.421958 4869 scope.go:117] "RemoveContainer" containerID="3f2782b835abbcf7e039732da011606c2d99078cf4074344b37e4de565416777" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.434110 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.435053 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.518360 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.518458 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.518758 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z994h\" (UniqueName: \"kubernetes.io/projected/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-kube-api-access-z994h\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.518908 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.519017 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.519286 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-config\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.519379 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.519487 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.519622 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.519736 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.519773 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622137 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622208 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z994h\" (UniqueName: \"kubernetes.io/projected/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-kube-api-access-z994h\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622244 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622290 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622327 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-config\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622358 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622384 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622420 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622475 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622495 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.622550 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.624168 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.629878 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.630159 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.630592 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.631030 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.634392 4869 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.634666 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.634538 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/279150bdb70b2f663e8288477cfabe3c1abac14428cf8cff87f747a1669c049c/globalmount\"" pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.653743 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.654347 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.655911 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-config\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.658520 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z994h\" (UniqueName: \"kubernetes.io/projected/26e88dc5-69ae-4a63-b9ff-b81d4bc78079-kube-api-access-z994h\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.707930 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2aaf80bb-a587-4d5f-b4f9-159393870ae9\") pod \"prometheus-metric-storage-0\" (UID: \"26e88dc5-69ae-4a63-b9ff-b81d4bc78079\") " pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:21 crc kubenswrapper[4869]: I0929 14:39:21.807501 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:22 crc kubenswrapper[4869]: I0929 14:39:22.296695 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52556bdb-2237-4c52-980f-9c3fd051804e" path="/var/lib/kubelet/pods/52556bdb-2237-4c52-980f-9c3fd051804e/volumes" Sep 29 14:39:22 crc kubenswrapper[4869]: I0929 14:39:22.430150 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Sep 29 14:39:23 crc kubenswrapper[4869]: I0929 14:39:23.368549 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"26e88dc5-69ae-4a63-b9ff-b81d4bc78079","Type":"ContainerStarted","Data":"9a3af8f657a473d02c1bf914895a7611acc8b0cbbed2102facdadd9cd633927e"} Sep 29 14:39:27 crc kubenswrapper[4869]: I0929 14:39:27.418266 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"26e88dc5-69ae-4a63-b9ff-b81d4bc78079","Type":"ContainerStarted","Data":"800e3d3a73e13cbd77328bf4cb3b6d57ab4aa3d9c910965c63fb0c2802366e8e"} Sep 29 14:39:36 crc kubenswrapper[4869]: I0929 14:39:36.521419 4869 generic.go:334] "Generic (PLEG): container finished" podID="26e88dc5-69ae-4a63-b9ff-b81d4bc78079" containerID="800e3d3a73e13cbd77328bf4cb3b6d57ab4aa3d9c910965c63fb0c2802366e8e" exitCode=0 Sep 29 14:39:36 crc kubenswrapper[4869]: I0929 14:39:36.521511 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"26e88dc5-69ae-4a63-b9ff-b81d4bc78079","Type":"ContainerDied","Data":"800e3d3a73e13cbd77328bf4cb3b6d57ab4aa3d9c910965c63fb0c2802366e8e"} Sep 29 14:39:37 crc kubenswrapper[4869]: I0929 14:39:37.548345 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"26e88dc5-69ae-4a63-b9ff-b81d4bc78079","Type":"ContainerStarted","Data":"5aa220061d578b5c891b0a1d9ab1a51cae51c487cdd8048e662b70cc8b6be98f"} Sep 29 14:39:41 crc kubenswrapper[4869]: I0929 14:39:41.594942 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"26e88dc5-69ae-4a63-b9ff-b81d4bc78079","Type":"ContainerStarted","Data":"2574c2bca9564adcbded572dc75f6df494be85c46aae99a8daca133b082ea92d"} Sep 29 14:39:41 crc kubenswrapper[4869]: I0929 14:39:41.595662 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"26e88dc5-69ae-4a63-b9ff-b81d4bc78079","Type":"ContainerStarted","Data":"2fd5b25acc74264d6911d54aac22a8d63c9327493bed25a8dbdc2ed8a5e2636d"} Sep 29 14:39:41 crc kubenswrapper[4869]: I0929 14:39:41.626837 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=20.626806609 podStartE2EDuration="20.626806609s" podCreationTimestamp="2025-09-29 14:39:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:39:41.621556461 +0000 UTC m=+3508.062200801" watchObservedRunningTime="2025-09-29 14:39:41.626806609 +0000 UTC m=+3508.067450969" Sep 29 14:39:41 crc kubenswrapper[4869]: I0929 14:39:41.807890 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:50 crc kubenswrapper[4869]: I0929 14:39:50.657266 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:39:50 crc kubenswrapper[4869]: I0929 14:39:50.658141 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:39:50 crc kubenswrapper[4869]: I0929 14:39:50.658225 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:39:50 crc kubenswrapper[4869]: I0929 14:39:50.659587 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f298b97a8edb9c60bc2dd60bf4f41dedb5b4755782ceecf01ef58ab01f623568"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:39:50 crc kubenswrapper[4869]: I0929 14:39:50.659728 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://f298b97a8edb9c60bc2dd60bf4f41dedb5b4755782ceecf01ef58ab01f623568" gracePeriod=600 Sep 29 14:39:51 crc kubenswrapper[4869]: I0929 14:39:51.727927 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="f298b97a8edb9c60bc2dd60bf4f41dedb5b4755782ceecf01ef58ab01f623568" exitCode=0 Sep 29 14:39:51 crc kubenswrapper[4869]: I0929 14:39:51.727997 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"f298b97a8edb9c60bc2dd60bf4f41dedb5b4755782ceecf01ef58ab01f623568"} Sep 29 14:39:51 crc kubenswrapper[4869]: I0929 14:39:51.728709 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40"} Sep 29 14:39:51 crc kubenswrapper[4869]: I0929 14:39:51.728739 4869 scope.go:117] "RemoveContainer" containerID="a33ac647fa14bdd1b9c931afd739d61cf5e7693ed6574cd189ff429edd6ad31e" Sep 29 14:39:51 crc kubenswrapper[4869]: I0929 14:39:51.808060 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:51 crc kubenswrapper[4869]: I0929 14:39:51.816404 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Sep 29 14:39:52 crc kubenswrapper[4869]: I0929 14:39:52.749922 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.095906 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5jqrc"] Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.099631 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.115165 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5jqrc"] Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.243243 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkzxg\" (UniqueName: \"kubernetes.io/projected/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-kube-api-access-mkzxg\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.244038 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-catalog-content\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.244236 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-utilities\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.347382 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-catalog-content\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.347637 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-utilities\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.347724 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkzxg\" (UniqueName: \"kubernetes.io/projected/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-kube-api-access-mkzxg\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.347991 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-catalog-content\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.348337 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-utilities\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.376393 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkzxg\" (UniqueName: \"kubernetes.io/projected/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-kube-api-access-mkzxg\") pod \"redhat-operators-5jqrc\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:08 crc kubenswrapper[4869]: I0929 14:40:08.445063 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:09 crc kubenswrapper[4869]: I0929 14:40:09.010015 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5jqrc"] Sep 29 14:40:09 crc kubenswrapper[4869]: I0929 14:40:09.943499 4869 generic.go:334] "Generic (PLEG): container finished" podID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerID="a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74" exitCode=0 Sep 29 14:40:09 crc kubenswrapper[4869]: I0929 14:40:09.944150 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jqrc" event={"ID":"0eaf8cc9-7121-426c-ae84-ce53d3448f1b","Type":"ContainerDied","Data":"a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74"} Sep 29 14:40:09 crc kubenswrapper[4869]: I0929 14:40:09.944187 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jqrc" event={"ID":"0eaf8cc9-7121-426c-ae84-ce53d3448f1b","Type":"ContainerStarted","Data":"81cf8b17397db1b9576fa1b26bdfc428401aafb8a759c720c9944cb196e309e3"} Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.795108 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.799250 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.802445 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.802733 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-hsqq2" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.803005 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.809364 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.829875 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.846771 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.846882 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.846922 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.846994 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjb2p\" (UniqueName: \"kubernetes.io/projected/3b562fc4-b928-4883-8bd5-2db40da004d0-kube-api-access-gjb2p\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.847057 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.847107 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.847137 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-config-data\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.847210 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.847267 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.948916 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.949034 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.949082 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-config-data\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.949177 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.949231 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.949287 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.949364 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.949408 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.949502 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjb2p\" (UniqueName: \"kubernetes.io/projected/3b562fc4-b928-4883-8bd5-2db40da004d0-kube-api-access-gjb2p\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.951557 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.951600 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.952105 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.954933 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-config-data\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.957244 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.957702 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.969364 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jqrc" event={"ID":"0eaf8cc9-7121-426c-ae84-ce53d3448f1b","Type":"ContainerStarted","Data":"4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5"} Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.969464 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.973014 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjb2p\" (UniqueName: \"kubernetes.io/projected/3b562fc4-b928-4883-8bd5-2db40da004d0-kube-api-access-gjb2p\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:11 crc kubenswrapper[4869]: I0929 14:40:11.984365 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/tempest-tests-tempest" Sep 29 14:40:12 crc kubenswrapper[4869]: I0929 14:40:12.021879 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " pod="openstack/tempest-tests-tempest" Sep 29 14:40:12 crc kubenswrapper[4869]: I0929 14:40:12.134567 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Sep 29 14:40:12 crc kubenswrapper[4869]: I0929 14:40:12.605677 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Sep 29 14:40:12 crc kubenswrapper[4869]: I0929 14:40:12.984355 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"3b562fc4-b928-4883-8bd5-2db40da004d0","Type":"ContainerStarted","Data":"a42d6927b6e9f776fce725ee7d7c728f797c27b2938a6311d6b735150962ec92"} Sep 29 14:40:14 crc kubenswrapper[4869]: I0929 14:40:14.002836 4869 generic.go:334] "Generic (PLEG): container finished" podID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerID="4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5" exitCode=0 Sep 29 14:40:14 crc kubenswrapper[4869]: I0929 14:40:14.002943 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jqrc" event={"ID":"0eaf8cc9-7121-426c-ae84-ce53d3448f1b","Type":"ContainerDied","Data":"4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5"} Sep 29 14:40:23 crc kubenswrapper[4869]: I0929 14:40:23.067254 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Sep 29 14:40:24 crc kubenswrapper[4869]: I0929 14:40:24.127201 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jqrc" event={"ID":"0eaf8cc9-7121-426c-ae84-ce53d3448f1b","Type":"ContainerStarted","Data":"32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d"} Sep 29 14:40:24 crc kubenswrapper[4869]: I0929 14:40:24.131630 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"3b562fc4-b928-4883-8bd5-2db40da004d0","Type":"ContainerStarted","Data":"41cb5366a81367e1f0864db0f1488e81d36d43df650c7c5c97b20434c3ff28ed"} Sep 29 14:40:24 crc kubenswrapper[4869]: I0929 14:40:24.158231 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5jqrc" podStartSLOduration=3.068026763 podStartE2EDuration="16.158207542s" podCreationTimestamp="2025-09-29 14:40:08 +0000 UTC" firstStartedPulling="2025-09-29 14:40:09.952233967 +0000 UTC m=+3536.392878287" lastFinishedPulling="2025-09-29 14:40:23.042414746 +0000 UTC m=+3549.483059066" observedRunningTime="2025-09-29 14:40:24.155119072 +0000 UTC m=+3550.595763382" watchObservedRunningTime="2025-09-29 14:40:24.158207542 +0000 UTC m=+3550.598851852" Sep 29 14:40:24 crc kubenswrapper[4869]: I0929 14:40:24.181006 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.721499698 podStartE2EDuration="14.180983108s" podCreationTimestamp="2025-09-29 14:40:10 +0000 UTC" firstStartedPulling="2025-09-29 14:40:12.603895394 +0000 UTC m=+3539.044539724" lastFinishedPulling="2025-09-29 14:40:23.063378814 +0000 UTC m=+3549.504023134" observedRunningTime="2025-09-29 14:40:24.175487624 +0000 UTC m=+3550.616131954" watchObservedRunningTime="2025-09-29 14:40:24.180983108 +0000 UTC m=+3550.621627418" Sep 29 14:40:28 crc kubenswrapper[4869]: I0929 14:40:28.445700 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:28 crc kubenswrapper[4869]: I0929 14:40:28.446482 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:29 crc kubenswrapper[4869]: I0929 14:40:29.504036 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5jqrc" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="registry-server" probeResult="failure" output=< Sep 29 14:40:29 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 14:40:29 crc kubenswrapper[4869]: > Sep 29 14:40:39 crc kubenswrapper[4869]: I0929 14:40:39.509023 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5jqrc" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="registry-server" probeResult="failure" output=< Sep 29 14:40:39 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 14:40:39 crc kubenswrapper[4869]: > Sep 29 14:40:48 crc kubenswrapper[4869]: I0929 14:40:48.522890 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:48 crc kubenswrapper[4869]: I0929 14:40:48.593452 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:48 crc kubenswrapper[4869]: I0929 14:40:48.818660 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5jqrc"] Sep 29 14:40:50 crc kubenswrapper[4869]: I0929 14:40:50.444772 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5jqrc" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="registry-server" containerID="cri-o://32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d" gracePeriod=2 Sep 29 14:40:50 crc kubenswrapper[4869]: I0929 14:40:50.993731 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.072336 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-utilities\") pod \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.072843 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkzxg\" (UniqueName: \"kubernetes.io/projected/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-kube-api-access-mkzxg\") pod \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.072981 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-catalog-content\") pod \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\" (UID: \"0eaf8cc9-7121-426c-ae84-ce53d3448f1b\") " Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.073499 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-utilities" (OuterVolumeSpecName: "utilities") pod "0eaf8cc9-7121-426c-ae84-ce53d3448f1b" (UID: "0eaf8cc9-7121-426c-ae84-ce53d3448f1b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.073933 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.081157 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-kube-api-access-mkzxg" (OuterVolumeSpecName: "kube-api-access-mkzxg") pod "0eaf8cc9-7121-426c-ae84-ce53d3448f1b" (UID: "0eaf8cc9-7121-426c-ae84-ce53d3448f1b"). InnerVolumeSpecName "kube-api-access-mkzxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.170194 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0eaf8cc9-7121-426c-ae84-ce53d3448f1b" (UID: "0eaf8cc9-7121-426c-ae84-ce53d3448f1b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.176161 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkzxg\" (UniqueName: \"kubernetes.io/projected/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-kube-api-access-mkzxg\") on node \"crc\" DevicePath \"\"" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.176192 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eaf8cc9-7121-426c-ae84-ce53d3448f1b-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.459788 4869 generic.go:334] "Generic (PLEG): container finished" podID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerID="32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d" exitCode=0 Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.459878 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5jqrc" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.459927 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jqrc" event={"ID":"0eaf8cc9-7121-426c-ae84-ce53d3448f1b","Type":"ContainerDied","Data":"32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d"} Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.462258 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jqrc" event={"ID":"0eaf8cc9-7121-426c-ae84-ce53d3448f1b","Type":"ContainerDied","Data":"81cf8b17397db1b9576fa1b26bdfc428401aafb8a759c720c9944cb196e309e3"} Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.462404 4869 scope.go:117] "RemoveContainer" containerID="32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.487117 4869 scope.go:117] "RemoveContainer" containerID="4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.509396 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5jqrc"] Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.515589 4869 scope.go:117] "RemoveContainer" containerID="a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.524084 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5jqrc"] Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.570563 4869 scope.go:117] "RemoveContainer" containerID="32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d" Sep 29 14:40:51 crc kubenswrapper[4869]: E0929 14:40:51.571336 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d\": container with ID starting with 32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d not found: ID does not exist" containerID="32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.571403 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d"} err="failed to get container status \"32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d\": rpc error: code = NotFound desc = could not find container \"32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d\": container with ID starting with 32ebffa3335583b344bed8dc234fbe1a691e62c69f86aa87a3a11cc59c988e9d not found: ID does not exist" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.571449 4869 scope.go:117] "RemoveContainer" containerID="4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5" Sep 29 14:40:51 crc kubenswrapper[4869]: E0929 14:40:51.572025 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5\": container with ID starting with 4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5 not found: ID does not exist" containerID="4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.572053 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5"} err="failed to get container status \"4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5\": rpc error: code = NotFound desc = could not find container \"4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5\": container with ID starting with 4639810083e3bcb63fa45a81939dbc15d7247688bb5b8050e7ed2a47618658f5 not found: ID does not exist" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.572073 4869 scope.go:117] "RemoveContainer" containerID="a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74" Sep 29 14:40:51 crc kubenswrapper[4869]: E0929 14:40:51.572487 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74\": container with ID starting with a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74 not found: ID does not exist" containerID="a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74" Sep 29 14:40:51 crc kubenswrapper[4869]: I0929 14:40:51.572544 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74"} err="failed to get container status \"a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74\": rpc error: code = NotFound desc = could not find container \"a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74\": container with ID starting with a0394f30a93936a0f2cac46e62c9ad02b2873e1cdbe10388b1e8edd483a15f74 not found: ID does not exist" Sep 29 14:40:52 crc kubenswrapper[4869]: I0929 14:40:52.259252 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" path="/var/lib/kubelet/pods/0eaf8cc9-7121-426c-ae84-ce53d3448f1b/volumes" Sep 29 14:42:20 crc kubenswrapper[4869]: I0929 14:42:20.657299 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:42:20 crc kubenswrapper[4869]: I0929 14:42:20.658157 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:42:50 crc kubenswrapper[4869]: I0929 14:42:50.657369 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:42:50 crc kubenswrapper[4869]: I0929 14:42:50.658115 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:43:20 crc kubenswrapper[4869]: I0929 14:43:20.657500 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:43:20 crc kubenswrapper[4869]: I0929 14:43:20.658300 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:43:20 crc kubenswrapper[4869]: I0929 14:43:20.658369 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:43:21 crc kubenswrapper[4869]: I0929 14:43:21.124973 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:43:21 crc kubenswrapper[4869]: I0929 14:43:21.125079 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" gracePeriod=600 Sep 29 14:43:21 crc kubenswrapper[4869]: E0929 14:43:21.250467 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:43:22 crc kubenswrapper[4869]: I0929 14:43:22.136639 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" exitCode=0 Sep 29 14:43:22 crc kubenswrapper[4869]: I0929 14:43:22.136672 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40"} Sep 29 14:43:22 crc kubenswrapper[4869]: I0929 14:43:22.137093 4869 scope.go:117] "RemoveContainer" containerID="f298b97a8edb9c60bc2dd60bf4f41dedb5b4755782ceecf01ef58ab01f623568" Sep 29 14:43:22 crc kubenswrapper[4869]: I0929 14:43:22.137899 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:43:22 crc kubenswrapper[4869]: E0929 14:43:22.138208 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.677509 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-64xnh"] Sep 29 14:43:35 crc kubenswrapper[4869]: E0929 14:43:35.678965 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="extract-content" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.678988 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="extract-content" Sep 29 14:43:35 crc kubenswrapper[4869]: E0929 14:43:35.679019 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="registry-server" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.679027 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="registry-server" Sep 29 14:43:35 crc kubenswrapper[4869]: E0929 14:43:35.679080 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="extract-utilities" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.679088 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="extract-utilities" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.679361 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eaf8cc9-7121-426c-ae84-ce53d3448f1b" containerName="registry-server" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.681346 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.692059 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64xnh"] Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.747079 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdf41451-b799-4eea-a0fb-2804371471b7-catalog-content\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.747379 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdf41451-b799-4eea-a0fb-2804371471b7-utilities\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.747483 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6kdz\" (UniqueName: \"kubernetes.io/projected/bdf41451-b799-4eea-a0fb-2804371471b7-kube-api-access-d6kdz\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.850386 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdf41451-b799-4eea-a0fb-2804371471b7-utilities\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.850495 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6kdz\" (UniqueName: \"kubernetes.io/projected/bdf41451-b799-4eea-a0fb-2804371471b7-kube-api-access-d6kdz\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.850550 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdf41451-b799-4eea-a0fb-2804371471b7-catalog-content\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.850979 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdf41451-b799-4eea-a0fb-2804371471b7-utilities\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:35 crc kubenswrapper[4869]: I0929 14:43:35.851117 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdf41451-b799-4eea-a0fb-2804371471b7-catalog-content\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:36 crc kubenswrapper[4869]: I0929 14:43:36.009075 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6kdz\" (UniqueName: \"kubernetes.io/projected/bdf41451-b799-4eea-a0fb-2804371471b7-kube-api-access-d6kdz\") pod \"certified-operators-64xnh\" (UID: \"bdf41451-b799-4eea-a0fb-2804371471b7\") " pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:36 crc kubenswrapper[4869]: I0929 14:43:36.309926 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:36 crc kubenswrapper[4869]: I0929 14:43:36.850345 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64xnh"] Sep 29 14:43:37 crc kubenswrapper[4869]: I0929 14:43:37.242802 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:43:37 crc kubenswrapper[4869]: E0929 14:43:37.243542 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:43:37 crc kubenswrapper[4869]: I0929 14:43:37.332569 4869 generic.go:334] "Generic (PLEG): container finished" podID="bdf41451-b799-4eea-a0fb-2804371471b7" containerID="a339d839b6d2593697cdd5cbd2c8e87a0c671827a9ae010219278a5dc0fe343f" exitCode=0 Sep 29 14:43:37 crc kubenswrapper[4869]: I0929 14:43:37.332690 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64xnh" event={"ID":"bdf41451-b799-4eea-a0fb-2804371471b7","Type":"ContainerDied","Data":"a339d839b6d2593697cdd5cbd2c8e87a0c671827a9ae010219278a5dc0fe343f"} Sep 29 14:43:37 crc kubenswrapper[4869]: I0929 14:43:37.332721 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64xnh" event={"ID":"bdf41451-b799-4eea-a0fb-2804371471b7","Type":"ContainerStarted","Data":"ac973259730fcf19190b03b35a0a6faac9425165ee391ee9a24260d9e340148c"} Sep 29 14:43:37 crc kubenswrapper[4869]: I0929 14:43:37.341110 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:43:41 crc kubenswrapper[4869]: I0929 14:43:41.850439 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-slspb"] Sep 29 14:43:41 crc kubenswrapper[4869]: I0929 14:43:41.853414 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:41 crc kubenswrapper[4869]: I0929 14:43:41.865213 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-slspb"] Sep 29 14:43:41 crc kubenswrapper[4869]: I0929 14:43:41.914634 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-catalog-content\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:41 crc kubenswrapper[4869]: I0929 14:43:41.914814 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-utilities\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:41 crc kubenswrapper[4869]: I0929 14:43:41.914882 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gwql\" (UniqueName: \"kubernetes.io/projected/396998c2-841b-4d0e-a291-a61d0994234c-kube-api-access-5gwql\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:42 crc kubenswrapper[4869]: I0929 14:43:42.017324 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-utilities\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:42 crc kubenswrapper[4869]: I0929 14:43:42.017436 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gwql\" (UniqueName: \"kubernetes.io/projected/396998c2-841b-4d0e-a291-a61d0994234c-kube-api-access-5gwql\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:42 crc kubenswrapper[4869]: I0929 14:43:42.017567 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-catalog-content\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:42 crc kubenswrapper[4869]: I0929 14:43:42.017967 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-utilities\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:42 crc kubenswrapper[4869]: I0929 14:43:42.018133 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-catalog-content\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:42 crc kubenswrapper[4869]: I0929 14:43:42.040806 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gwql\" (UniqueName: \"kubernetes.io/projected/396998c2-841b-4d0e-a291-a61d0994234c-kube-api-access-5gwql\") pod \"community-operators-slspb\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:42 crc kubenswrapper[4869]: I0929 14:43:42.177166 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:43 crc kubenswrapper[4869]: I0929 14:43:43.973046 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-slspb"] Sep 29 14:43:44 crc kubenswrapper[4869]: I0929 14:43:44.423358 4869 generic.go:334] "Generic (PLEG): container finished" podID="bdf41451-b799-4eea-a0fb-2804371471b7" containerID="84ad04d24fa6ccc192c81d7f9680b06d48db28d9d56222b6a3d1b3a0fe30d653" exitCode=0 Sep 29 14:43:44 crc kubenswrapper[4869]: I0929 14:43:44.423430 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64xnh" event={"ID":"bdf41451-b799-4eea-a0fb-2804371471b7","Type":"ContainerDied","Data":"84ad04d24fa6ccc192c81d7f9680b06d48db28d9d56222b6a3d1b3a0fe30d653"} Sep 29 14:43:44 crc kubenswrapper[4869]: I0929 14:43:44.424779 4869 generic.go:334] "Generic (PLEG): container finished" podID="396998c2-841b-4d0e-a291-a61d0994234c" containerID="cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1" exitCode=0 Sep 29 14:43:44 crc kubenswrapper[4869]: I0929 14:43:44.424807 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slspb" event={"ID":"396998c2-841b-4d0e-a291-a61d0994234c","Type":"ContainerDied","Data":"cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1"} Sep 29 14:43:44 crc kubenswrapper[4869]: I0929 14:43:44.424823 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slspb" event={"ID":"396998c2-841b-4d0e-a291-a61d0994234c","Type":"ContainerStarted","Data":"1feee69b4ed7876f2a79a00f47e444b6447592cc7fdc4b93de6185db341be13c"} Sep 29 14:43:45 crc kubenswrapper[4869]: I0929 14:43:45.438975 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64xnh" event={"ID":"bdf41451-b799-4eea-a0fb-2804371471b7","Type":"ContainerStarted","Data":"489d662243a4e2a1601fc92691c0f437210f549eead0bc4b744213071449ab66"} Sep 29 14:43:45 crc kubenswrapper[4869]: I0929 14:43:45.469374 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-64xnh" podStartSLOduration=2.949010056 podStartE2EDuration="10.469319942s" podCreationTimestamp="2025-09-29 14:43:35 +0000 UTC" firstStartedPulling="2025-09-29 14:43:37.340803877 +0000 UTC m=+3743.781448207" lastFinishedPulling="2025-09-29 14:43:44.861113773 +0000 UTC m=+3751.301758093" observedRunningTime="2025-09-29 14:43:45.464347812 +0000 UTC m=+3751.904992132" watchObservedRunningTime="2025-09-29 14:43:45.469319942 +0000 UTC m=+3751.909964262" Sep 29 14:43:46 crc kubenswrapper[4869]: I0929 14:43:46.312008 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:46 crc kubenswrapper[4869]: I0929 14:43:46.312326 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:43:46 crc kubenswrapper[4869]: I0929 14:43:46.456565 4869 generic.go:334] "Generic (PLEG): container finished" podID="396998c2-841b-4d0e-a291-a61d0994234c" containerID="7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d" exitCode=0 Sep 29 14:43:46 crc kubenswrapper[4869]: I0929 14:43:46.456817 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slspb" event={"ID":"396998c2-841b-4d0e-a291-a61d0994234c","Type":"ContainerDied","Data":"7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d"} Sep 29 14:43:47 crc kubenswrapper[4869]: I0929 14:43:47.375307 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-64xnh" podUID="bdf41451-b799-4eea-a0fb-2804371471b7" containerName="registry-server" probeResult="failure" output=< Sep 29 14:43:47 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 14:43:47 crc kubenswrapper[4869]: > Sep 29 14:43:47 crc kubenswrapper[4869]: I0929 14:43:47.475920 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slspb" event={"ID":"396998c2-841b-4d0e-a291-a61d0994234c","Type":"ContainerStarted","Data":"539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a"} Sep 29 14:43:47 crc kubenswrapper[4869]: I0929 14:43:47.497703 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-slspb" podStartSLOduration=4.053722764 podStartE2EDuration="6.497679097s" podCreationTimestamp="2025-09-29 14:43:41 +0000 UTC" firstStartedPulling="2025-09-29 14:43:44.42797156 +0000 UTC m=+3750.868615880" lastFinishedPulling="2025-09-29 14:43:46.871927893 +0000 UTC m=+3753.312572213" observedRunningTime="2025-09-29 14:43:47.496123016 +0000 UTC m=+3753.936767356" watchObservedRunningTime="2025-09-29 14:43:47.497679097 +0000 UTC m=+3753.938323417" Sep 29 14:43:48 crc kubenswrapper[4869]: I0929 14:43:48.242756 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:43:48 crc kubenswrapper[4869]: E0929 14:43:48.243683 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:43:52 crc kubenswrapper[4869]: I0929 14:43:52.178310 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:52 crc kubenswrapper[4869]: I0929 14:43:52.179847 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-slspb" Sep 29 14:43:53 crc kubenswrapper[4869]: I0929 14:43:53.275344 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-slspb" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="registry-server" probeResult="failure" output=< Sep 29 14:43:53 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 14:43:53 crc kubenswrapper[4869]: > Sep 29 14:43:57 crc kubenswrapper[4869]: I0929 14:43:57.369076 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-64xnh" podUID="bdf41451-b799-4eea-a0fb-2804371471b7" containerName="registry-server" probeResult="failure" output=< Sep 29 14:43:57 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 14:43:57 crc kubenswrapper[4869]: > Sep 29 14:44:02 crc kubenswrapper[4869]: I0929 14:44:02.239137 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-slspb" Sep 29 14:44:02 crc kubenswrapper[4869]: I0929 14:44:02.320581 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-slspb" Sep 29 14:44:02 crc kubenswrapper[4869]: I0929 14:44:02.514175 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-slspb"] Sep 29 14:44:03 crc kubenswrapper[4869]: I0929 14:44:03.243344 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:44:03 crc kubenswrapper[4869]: E0929 14:44:03.243836 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:44:03 crc kubenswrapper[4869]: I0929 14:44:03.658181 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-slspb" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="registry-server" containerID="cri-o://539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a" gracePeriod=2 Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.392285 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-slspb" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.526445 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gwql\" (UniqueName: \"kubernetes.io/projected/396998c2-841b-4d0e-a291-a61d0994234c-kube-api-access-5gwql\") pod \"396998c2-841b-4d0e-a291-a61d0994234c\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.526533 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-utilities\") pod \"396998c2-841b-4d0e-a291-a61d0994234c\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.526680 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-catalog-content\") pod \"396998c2-841b-4d0e-a291-a61d0994234c\" (UID: \"396998c2-841b-4d0e-a291-a61d0994234c\") " Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.527337 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-utilities" (OuterVolumeSpecName: "utilities") pod "396998c2-841b-4d0e-a291-a61d0994234c" (UID: "396998c2-841b-4d0e-a291-a61d0994234c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.528678 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.535925 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/396998c2-841b-4d0e-a291-a61d0994234c-kube-api-access-5gwql" (OuterVolumeSpecName: "kube-api-access-5gwql") pod "396998c2-841b-4d0e-a291-a61d0994234c" (UID: "396998c2-841b-4d0e-a291-a61d0994234c"). InnerVolumeSpecName "kube-api-access-5gwql". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.582457 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "396998c2-841b-4d0e-a291-a61d0994234c" (UID: "396998c2-841b-4d0e-a291-a61d0994234c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.630911 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/396998c2-841b-4d0e-a291-a61d0994234c-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.631198 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gwql\" (UniqueName: \"kubernetes.io/projected/396998c2-841b-4d0e-a291-a61d0994234c-kube-api-access-5gwql\") on node \"crc\" DevicePath \"\"" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.671056 4869 generic.go:334] "Generic (PLEG): container finished" podID="396998c2-841b-4d0e-a291-a61d0994234c" containerID="539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a" exitCode=0 Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.671111 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slspb" event={"ID":"396998c2-841b-4d0e-a291-a61d0994234c","Type":"ContainerDied","Data":"539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a"} Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.671128 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-slspb" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.671147 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slspb" event={"ID":"396998c2-841b-4d0e-a291-a61d0994234c","Type":"ContainerDied","Data":"1feee69b4ed7876f2a79a00f47e444b6447592cc7fdc4b93de6185db341be13c"} Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.671167 4869 scope.go:117] "RemoveContainer" containerID="539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.694319 4869 scope.go:117] "RemoveContainer" containerID="7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.730297 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-slspb"] Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.734479 4869 scope.go:117] "RemoveContainer" containerID="cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.740396 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-slspb"] Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.780354 4869 scope.go:117] "RemoveContainer" containerID="539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a" Sep 29 14:44:04 crc kubenswrapper[4869]: E0929 14:44:04.781005 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a\": container with ID starting with 539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a not found: ID does not exist" containerID="539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.781040 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a"} err="failed to get container status \"539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a\": rpc error: code = NotFound desc = could not find container \"539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a\": container with ID starting with 539c904465534f56e684fa4b0a9bfb685960162552d5fe38764dfe56f5a1262a not found: ID does not exist" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.781065 4869 scope.go:117] "RemoveContainer" containerID="7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d" Sep 29 14:44:04 crc kubenswrapper[4869]: E0929 14:44:04.781371 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d\": container with ID starting with 7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d not found: ID does not exist" containerID="7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.781455 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d"} err="failed to get container status \"7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d\": rpc error: code = NotFound desc = could not find container \"7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d\": container with ID starting with 7d923591b1e3d226001c276d6f93a3b3f74a5e3907832b80679c14846e9c9d2d not found: ID does not exist" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.781518 4869 scope.go:117] "RemoveContainer" containerID="cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1" Sep 29 14:44:04 crc kubenswrapper[4869]: E0929 14:44:04.781916 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1\": container with ID starting with cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1 not found: ID does not exist" containerID="cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1" Sep 29 14:44:04 crc kubenswrapper[4869]: I0929 14:44:04.781937 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1"} err="failed to get container status \"cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1\": rpc error: code = NotFound desc = could not find container \"cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1\": container with ID starting with cc58ab8912fde0063bdf5df9fa6b050863f69ef072c25aa61566ed6ec562a9a1 not found: ID does not exist" Sep 29 14:44:06 crc kubenswrapper[4869]: I0929 14:44:06.254670 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="396998c2-841b-4d0e-a291-a61d0994234c" path="/var/lib/kubelet/pods/396998c2-841b-4d0e-a291-a61d0994234c/volumes" Sep 29 14:44:06 crc kubenswrapper[4869]: I0929 14:44:06.380419 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:44:06 crc kubenswrapper[4869]: I0929 14:44:06.442245 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-64xnh" Sep 29 14:44:07 crc kubenswrapper[4869]: I0929 14:44:07.534292 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64xnh"] Sep 29 14:44:07 crc kubenswrapper[4869]: I0929 14:44:07.908126 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qpffg"] Sep 29 14:44:07 crc kubenswrapper[4869]: I0929 14:44:07.908466 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qpffg" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="registry-server" containerID="cri-o://f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32" gracePeriod=2 Sep 29 14:44:07 crc kubenswrapper[4869]: E0929 14:44:07.942299 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32 is running failed: container process not found" containerID="f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32" cmd=["grpc_health_probe","-addr=:50051"] Sep 29 14:44:07 crc kubenswrapper[4869]: E0929 14:44:07.943169 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32 is running failed: container process not found" containerID="f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32" cmd=["grpc_health_probe","-addr=:50051"] Sep 29 14:44:07 crc kubenswrapper[4869]: E0929 14:44:07.945284 4869 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32 is running failed: container process not found" containerID="f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32" cmd=["grpc_health_probe","-addr=:50051"] Sep 29 14:44:07 crc kubenswrapper[4869]: E0929 14:44:07.945357 4869 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-qpffg" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="registry-server" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.445450 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.634471 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-utilities\") pod \"c732020f-26a7-4848-97ab-2b91cb919c1c\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.634775 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-catalog-content\") pod \"c732020f-26a7-4848-97ab-2b91cb919c1c\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.634906 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwksw\" (UniqueName: \"kubernetes.io/projected/c732020f-26a7-4848-97ab-2b91cb919c1c-kube-api-access-pwksw\") pod \"c732020f-26a7-4848-97ab-2b91cb919c1c\" (UID: \"c732020f-26a7-4848-97ab-2b91cb919c1c\") " Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.635759 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-utilities" (OuterVolumeSpecName: "utilities") pod "c732020f-26a7-4848-97ab-2b91cb919c1c" (UID: "c732020f-26a7-4848-97ab-2b91cb919c1c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.636003 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.642343 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c732020f-26a7-4848-97ab-2b91cb919c1c-kube-api-access-pwksw" (OuterVolumeSpecName: "kube-api-access-pwksw") pod "c732020f-26a7-4848-97ab-2b91cb919c1c" (UID: "c732020f-26a7-4848-97ab-2b91cb919c1c"). InnerVolumeSpecName "kube-api-access-pwksw". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.674722 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c732020f-26a7-4848-97ab-2b91cb919c1c" (UID: "c732020f-26a7-4848-97ab-2b91cb919c1c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.718341 4869 generic.go:334] "Generic (PLEG): container finished" podID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerID="f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32" exitCode=0 Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.718397 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qpffg" event={"ID":"c732020f-26a7-4848-97ab-2b91cb919c1c","Type":"ContainerDied","Data":"f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32"} Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.718432 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qpffg" event={"ID":"c732020f-26a7-4848-97ab-2b91cb919c1c","Type":"ContainerDied","Data":"2fc86c4ce9c296cc640948a19c070adc7617ac012a83ecb19214125b84608361"} Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.718451 4869 scope.go:117] "RemoveContainer" containerID="f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.718498 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qpffg" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.739696 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c732020f-26a7-4848-97ab-2b91cb919c1c-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.739737 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwksw\" (UniqueName: \"kubernetes.io/projected/c732020f-26a7-4848-97ab-2b91cb919c1c-kube-api-access-pwksw\") on node \"crc\" DevicePath \"\"" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.766591 4869 scope.go:117] "RemoveContainer" containerID="0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.773818 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qpffg"] Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.785894 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qpffg"] Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.791215 4869 scope.go:117] "RemoveContainer" containerID="429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.838571 4869 scope.go:117] "RemoveContainer" containerID="f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32" Sep 29 14:44:08 crc kubenswrapper[4869]: E0929 14:44:08.839304 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32\": container with ID starting with f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32 not found: ID does not exist" containerID="f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.839348 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32"} err="failed to get container status \"f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32\": rpc error: code = NotFound desc = could not find container \"f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32\": container with ID starting with f54825132808604749b95dd76e6694ec38cad376f61021d92a28de406d136e32 not found: ID does not exist" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.839386 4869 scope.go:117] "RemoveContainer" containerID="0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a" Sep 29 14:44:08 crc kubenswrapper[4869]: E0929 14:44:08.839979 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a\": container with ID starting with 0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a not found: ID does not exist" containerID="0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.840017 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a"} err="failed to get container status \"0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a\": rpc error: code = NotFound desc = could not find container \"0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a\": container with ID starting with 0dec81705907b2e92c1b49faf016333e7a246ac3d04dd065677b33c7a2e89d0a not found: ID does not exist" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.840038 4869 scope.go:117] "RemoveContainer" containerID="429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db" Sep 29 14:44:08 crc kubenswrapper[4869]: E0929 14:44:08.840309 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db\": container with ID starting with 429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db not found: ID does not exist" containerID="429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db" Sep 29 14:44:08 crc kubenswrapper[4869]: I0929 14:44:08.840342 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db"} err="failed to get container status \"429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db\": rpc error: code = NotFound desc = could not find container \"429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db\": container with ID starting with 429b6f09a4931e9c5dbce48189aea5375ee90f6c7ec51285d9a6500b95b3b7db not found: ID does not exist" Sep 29 14:44:10 crc kubenswrapper[4869]: I0929 14:44:10.262080 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" path="/var/lib/kubelet/pods/c732020f-26a7-4848-97ab-2b91cb919c1c/volumes" Sep 29 14:44:14 crc kubenswrapper[4869]: I0929 14:44:14.261106 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:44:14 crc kubenswrapper[4869]: E0929 14:44:14.262170 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:44:24 crc kubenswrapper[4869]: I0929 14:44:24.240589 4869 scope.go:117] "RemoveContainer" containerID="5f405495892b82c03162b657e9c32db8f7995437f67a1fc5d95d957a98c45b98" Sep 29 14:44:25 crc kubenswrapper[4869]: I0929 14:44:25.242788 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:44:25 crc kubenswrapper[4869]: E0929 14:44:25.243470 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:44:40 crc kubenswrapper[4869]: I0929 14:44:40.242571 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:44:40 crc kubenswrapper[4869]: E0929 14:44:40.243668 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:44:52 crc kubenswrapper[4869]: I0929 14:44:52.241904 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:44:52 crc kubenswrapper[4869]: E0929 14:44:52.243829 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.154699 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw"] Sep 29 14:45:00 crc kubenswrapper[4869]: E0929 14:45:00.155958 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="registry-server" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.155976 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="registry-server" Sep 29 14:45:00 crc kubenswrapper[4869]: E0929 14:45:00.156000 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="extract-utilities" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.156010 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="extract-utilities" Sep 29 14:45:00 crc kubenswrapper[4869]: E0929 14:45:00.156032 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="extract-utilities" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.156041 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="extract-utilities" Sep 29 14:45:00 crc kubenswrapper[4869]: E0929 14:45:00.156062 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="extract-content" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.156069 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="extract-content" Sep 29 14:45:00 crc kubenswrapper[4869]: E0929 14:45:00.156079 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="extract-content" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.156088 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="extract-content" Sep 29 14:45:00 crc kubenswrapper[4869]: E0929 14:45:00.156109 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="registry-server" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.156118 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="registry-server" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.156370 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="c732020f-26a7-4848-97ab-2b91cb919c1c" containerName="registry-server" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.156390 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="396998c2-841b-4d0e-a291-a61d0994234c" containerName="registry-server" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.157473 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.160407 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.160427 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.177764 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-secret-volume\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.177809 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-config-volume\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.177894 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tws87\" (UniqueName: \"kubernetes.io/projected/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-kube-api-access-tws87\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.177967 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw"] Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.278596 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-secret-volume\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.278947 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-config-volume\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.279008 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tws87\" (UniqueName: \"kubernetes.io/projected/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-kube-api-access-tws87\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.279936 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-config-volume\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.292365 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-secret-volume\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.300442 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tws87\" (UniqueName: \"kubernetes.io/projected/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-kube-api-access-tws87\") pod \"collect-profiles-29319285-bnfdw\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:00 crc kubenswrapper[4869]: I0929 14:45:00.482882 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:01 crc kubenswrapper[4869]: I0929 14:45:01.083371 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw"] Sep 29 14:45:01 crc kubenswrapper[4869]: I0929 14:45:01.311206 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" event={"ID":"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6","Type":"ContainerStarted","Data":"41d1f569a598631e1dc4147f542e0e95c883855e2d37a739f85663c429f8a0be"} Sep 29 14:45:01 crc kubenswrapper[4869]: I0929 14:45:01.311285 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" event={"ID":"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6","Type":"ContainerStarted","Data":"ebf52e1d6530300c83bbe3b7da8682182988993abccbf678c821a37fd6092828"} Sep 29 14:45:01 crc kubenswrapper[4869]: I0929 14:45:01.339169 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" podStartSLOduration=1.338683842 podStartE2EDuration="1.338683842s" podCreationTimestamp="2025-09-29 14:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 14:45:01.328754783 +0000 UTC m=+3827.769399113" watchObservedRunningTime="2025-09-29 14:45:01.338683842 +0000 UTC m=+3827.779328172" Sep 29 14:45:02 crc kubenswrapper[4869]: I0929 14:45:02.323082 4869 generic.go:334] "Generic (PLEG): container finished" podID="ce60ac7b-6cf4-49ec-b2b3-a006648e73a6" containerID="41d1f569a598631e1dc4147f542e0e95c883855e2d37a739f85663c429f8a0be" exitCode=0 Sep 29 14:45:02 crc kubenswrapper[4869]: I0929 14:45:02.323194 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" event={"ID":"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6","Type":"ContainerDied","Data":"41d1f569a598631e1dc4147f542e0e95c883855e2d37a739f85663c429f8a0be"} Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.339059 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.344328 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" event={"ID":"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6","Type":"ContainerDied","Data":"ebf52e1d6530300c83bbe3b7da8682182988993abccbf678c821a37fd6092828"} Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.344363 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw" Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.344372 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ebf52e1d6530300c83bbe3b7da8682182988993abccbf678c821a37fd6092828" Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.379757 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-secret-volume\") pod \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.379888 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-config-volume\") pod \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.379991 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tws87\" (UniqueName: \"kubernetes.io/projected/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-kube-api-access-tws87\") pod \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\" (UID: \"ce60ac7b-6cf4-49ec-b2b3-a006648e73a6\") " Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.381360 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-config-volume" (OuterVolumeSpecName: "config-volume") pod "ce60ac7b-6cf4-49ec-b2b3-a006648e73a6" (UID: "ce60ac7b-6cf4-49ec-b2b3-a006648e73a6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.386066 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ce60ac7b-6cf4-49ec-b2b3-a006648e73a6" (UID: "ce60ac7b-6cf4-49ec-b2b3-a006648e73a6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.391331 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-kube-api-access-tws87" (OuterVolumeSpecName: "kube-api-access-tws87") pod "ce60ac7b-6cf4-49ec-b2b3-a006648e73a6" (UID: "ce60ac7b-6cf4-49ec-b2b3-a006648e73a6"). InnerVolumeSpecName "kube-api-access-tws87". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.482906 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.482951 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tws87\" (UniqueName: \"kubernetes.io/projected/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-kube-api-access-tws87\") on node \"crc\" DevicePath \"\"" Sep 29 14:45:04 crc kubenswrapper[4869]: I0929 14:45:04.482964 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 14:45:05 crc kubenswrapper[4869]: I0929 14:45:05.425214 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c"] Sep 29 14:45:05 crc kubenswrapper[4869]: I0929 14:45:05.434540 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319240-nq45c"] Sep 29 14:45:06 crc kubenswrapper[4869]: I0929 14:45:06.242780 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:45:06 crc kubenswrapper[4869]: E0929 14:45:06.243484 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:45:06 crc kubenswrapper[4869]: I0929 14:45:06.258949 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4467c3d-879c-4833-bfbd-7b6308962682" path="/var/lib/kubelet/pods/d4467c3d-879c-4833-bfbd-7b6308962682/volumes" Sep 29 14:45:19 crc kubenswrapper[4869]: I0929 14:45:19.242639 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:45:19 crc kubenswrapper[4869]: E0929 14:45:19.243935 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:45:24 crc kubenswrapper[4869]: I0929 14:45:24.325726 4869 scope.go:117] "RemoveContainer" containerID="a1a02ccd33d7d914cfd0dcd8969ab58ccd4fe3056a98cc556b4c27fbf6ced241" Sep 29 14:45:24 crc kubenswrapper[4869]: I0929 14:45:24.508760 4869 scope.go:117] "RemoveContainer" containerID="380534b77da008affdafba5e0bfe7f51993ac6c02afdce9a2e13d8faa7fd4902" Sep 29 14:45:32 crc kubenswrapper[4869]: I0929 14:45:32.242564 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:45:32 crc kubenswrapper[4869]: E0929 14:45:32.243342 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:45:46 crc kubenswrapper[4869]: I0929 14:45:46.242331 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:45:46 crc kubenswrapper[4869]: E0929 14:45:46.243287 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:45:59 crc kubenswrapper[4869]: I0929 14:45:59.242765 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:45:59 crc kubenswrapper[4869]: E0929 14:45:59.243887 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:46:14 crc kubenswrapper[4869]: I0929 14:46:14.258630 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:46:14 crc kubenswrapper[4869]: E0929 14:46:14.259543 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:46:27 crc kubenswrapper[4869]: I0929 14:46:27.242442 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:46:27 crc kubenswrapper[4869]: E0929 14:46:27.243113 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:46:40 crc kubenswrapper[4869]: I0929 14:46:40.242576 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:46:40 crc kubenswrapper[4869]: E0929 14:46:40.243988 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:46:53 crc kubenswrapper[4869]: I0929 14:46:53.242839 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:46:53 crc kubenswrapper[4869]: E0929 14:46:53.243679 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:47:07 crc kubenswrapper[4869]: I0929 14:47:07.242520 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:47:07 crc kubenswrapper[4869]: E0929 14:47:07.243400 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:47:20 crc kubenswrapper[4869]: I0929 14:47:20.245102 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:47:20 crc kubenswrapper[4869]: E0929 14:47:20.246179 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.427299 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-69ndq"] Sep 29 14:47:26 crc kubenswrapper[4869]: E0929 14:47:26.428980 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce60ac7b-6cf4-49ec-b2b3-a006648e73a6" containerName="collect-profiles" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.429001 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce60ac7b-6cf4-49ec-b2b3-a006648e73a6" containerName="collect-profiles" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.429984 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce60ac7b-6cf4-49ec-b2b3-a006648e73a6" containerName="collect-profiles" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.431569 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.442140 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-utilities\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.442224 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-catalog-content\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.442292 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnkdk\" (UniqueName: \"kubernetes.io/projected/4978003b-3b4f-4c8e-a81a-57f0957948f3-kube-api-access-vnkdk\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.450995 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-69ndq"] Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.544066 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnkdk\" (UniqueName: \"kubernetes.io/projected/4978003b-3b4f-4c8e-a81a-57f0957948f3-kube-api-access-vnkdk\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.544769 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-utilities\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.544866 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-catalog-content\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.545436 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-catalog-content\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.545558 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-utilities\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.568934 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnkdk\" (UniqueName: \"kubernetes.io/projected/4978003b-3b4f-4c8e-a81a-57f0957948f3-kube-api-access-vnkdk\") pod \"redhat-marketplace-69ndq\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:26 crc kubenswrapper[4869]: I0929 14:47:26.802706 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:27 crc kubenswrapper[4869]: I0929 14:47:27.280172 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-69ndq"] Sep 29 14:47:28 crc kubenswrapper[4869]: I0929 14:47:28.010253 4869 generic.go:334] "Generic (PLEG): container finished" podID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerID="1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092" exitCode=0 Sep 29 14:47:28 crc kubenswrapper[4869]: I0929 14:47:28.010325 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-69ndq" event={"ID":"4978003b-3b4f-4c8e-a81a-57f0957948f3","Type":"ContainerDied","Data":"1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092"} Sep 29 14:47:28 crc kubenswrapper[4869]: I0929 14:47:28.010574 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-69ndq" event={"ID":"4978003b-3b4f-4c8e-a81a-57f0957948f3","Type":"ContainerStarted","Data":"a377e16a8325b5b1570d6194e7df0ba9275cf8d2c5b7809dcff2eeff3a79681b"} Sep 29 14:47:30 crc kubenswrapper[4869]: I0929 14:47:30.032645 4869 generic.go:334] "Generic (PLEG): container finished" podID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerID="98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b" exitCode=0 Sep 29 14:47:30 crc kubenswrapper[4869]: I0929 14:47:30.032767 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-69ndq" event={"ID":"4978003b-3b4f-4c8e-a81a-57f0957948f3","Type":"ContainerDied","Data":"98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b"} Sep 29 14:47:31 crc kubenswrapper[4869]: I0929 14:47:31.044496 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-69ndq" event={"ID":"4978003b-3b4f-4c8e-a81a-57f0957948f3","Type":"ContainerStarted","Data":"308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2"} Sep 29 14:47:31 crc kubenswrapper[4869]: I0929 14:47:31.069101 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-69ndq" podStartSLOduration=2.669846407 podStartE2EDuration="5.069077643s" podCreationTimestamp="2025-09-29 14:47:26 +0000 UTC" firstStartedPulling="2025-09-29 14:47:28.012727324 +0000 UTC m=+3974.453371644" lastFinishedPulling="2025-09-29 14:47:30.41195855 +0000 UTC m=+3976.852602880" observedRunningTime="2025-09-29 14:47:31.062780109 +0000 UTC m=+3977.503424429" watchObservedRunningTime="2025-09-29 14:47:31.069077643 +0000 UTC m=+3977.509721953" Sep 29 14:47:32 crc kubenswrapper[4869]: I0929 14:47:32.242371 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:47:32 crc kubenswrapper[4869]: E0929 14:47:32.243095 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:47:36 crc kubenswrapper[4869]: I0929 14:47:36.803840 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:36 crc kubenswrapper[4869]: I0929 14:47:36.804411 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:36 crc kubenswrapper[4869]: I0929 14:47:36.852560 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:37 crc kubenswrapper[4869]: I0929 14:47:37.172062 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:37 crc kubenswrapper[4869]: I0929 14:47:37.215777 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-69ndq"] Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.143173 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-69ndq" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerName="registry-server" containerID="cri-o://308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2" gracePeriod=2 Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.576052 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.677470 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnkdk\" (UniqueName: \"kubernetes.io/projected/4978003b-3b4f-4c8e-a81a-57f0957948f3-kube-api-access-vnkdk\") pod \"4978003b-3b4f-4c8e-a81a-57f0957948f3\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.677672 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-utilities\") pod \"4978003b-3b4f-4c8e-a81a-57f0957948f3\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.677742 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-catalog-content\") pod \"4978003b-3b4f-4c8e-a81a-57f0957948f3\" (UID: \"4978003b-3b4f-4c8e-a81a-57f0957948f3\") " Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.678729 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-utilities" (OuterVolumeSpecName: "utilities") pod "4978003b-3b4f-4c8e-a81a-57f0957948f3" (UID: "4978003b-3b4f-4c8e-a81a-57f0957948f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.686007 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4978003b-3b4f-4c8e-a81a-57f0957948f3-kube-api-access-vnkdk" (OuterVolumeSpecName: "kube-api-access-vnkdk") pod "4978003b-3b4f-4c8e-a81a-57f0957948f3" (UID: "4978003b-3b4f-4c8e-a81a-57f0957948f3"). InnerVolumeSpecName "kube-api-access-vnkdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.696723 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4978003b-3b4f-4c8e-a81a-57f0957948f3" (UID: "4978003b-3b4f-4c8e-a81a-57f0957948f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.780490 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.780525 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4978003b-3b4f-4c8e-a81a-57f0957948f3-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:47:39 crc kubenswrapper[4869]: I0929 14:47:39.780539 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnkdk\" (UniqueName: \"kubernetes.io/projected/4978003b-3b4f-4c8e-a81a-57f0957948f3-kube-api-access-vnkdk\") on node \"crc\" DevicePath \"\"" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.156629 4869 generic.go:334] "Generic (PLEG): container finished" podID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerID="308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2" exitCode=0 Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.156698 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-69ndq" event={"ID":"4978003b-3b4f-4c8e-a81a-57f0957948f3","Type":"ContainerDied","Data":"308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2"} Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.157051 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-69ndq" event={"ID":"4978003b-3b4f-4c8e-a81a-57f0957948f3","Type":"ContainerDied","Data":"a377e16a8325b5b1570d6194e7df0ba9275cf8d2c5b7809dcff2eeff3a79681b"} Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.156716 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-69ndq" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.157085 4869 scope.go:117] "RemoveContainer" containerID="308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.191680 4869 scope.go:117] "RemoveContainer" containerID="98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.203356 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-69ndq"] Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.214217 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-69ndq"] Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.224696 4869 scope.go:117] "RemoveContainer" containerID="1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.266993 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" path="/var/lib/kubelet/pods/4978003b-3b4f-4c8e-a81a-57f0957948f3/volumes" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.277993 4869 scope.go:117] "RemoveContainer" containerID="308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2" Sep 29 14:47:40 crc kubenswrapper[4869]: E0929 14:47:40.278662 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2\": container with ID starting with 308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2 not found: ID does not exist" containerID="308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.278720 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2"} err="failed to get container status \"308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2\": rpc error: code = NotFound desc = could not find container \"308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2\": container with ID starting with 308a049d64507e66bc549ec4a32ff4a977bfeab94d38eb4336b7c055bdebb9e2 not found: ID does not exist" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.278758 4869 scope.go:117] "RemoveContainer" containerID="98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b" Sep 29 14:47:40 crc kubenswrapper[4869]: E0929 14:47:40.279761 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b\": container with ID starting with 98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b not found: ID does not exist" containerID="98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.279791 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b"} err="failed to get container status \"98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b\": rpc error: code = NotFound desc = could not find container \"98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b\": container with ID starting with 98ed2e2359995d9b99991cc7951347dd22775feaa62b4c21c1a90e34ba7e914b not found: ID does not exist" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.279809 4869 scope.go:117] "RemoveContainer" containerID="1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092" Sep 29 14:47:40 crc kubenswrapper[4869]: E0929 14:47:40.280328 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092\": container with ID starting with 1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092 not found: ID does not exist" containerID="1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092" Sep 29 14:47:40 crc kubenswrapper[4869]: I0929 14:47:40.280391 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092"} err="failed to get container status \"1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092\": rpc error: code = NotFound desc = could not find container \"1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092\": container with ID starting with 1b266f96526da33c37f745d8f3ac66466f057aa0e594bed41a79b1076567d092 not found: ID does not exist" Sep 29 14:47:43 crc kubenswrapper[4869]: I0929 14:47:43.242519 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:47:43 crc kubenswrapper[4869]: E0929 14:47:43.243828 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:47:58 crc kubenswrapper[4869]: I0929 14:47:58.242572 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:47:58 crc kubenswrapper[4869]: E0929 14:47:58.243579 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:48:11 crc kubenswrapper[4869]: I0929 14:48:11.242967 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:48:11 crc kubenswrapper[4869]: E0929 14:48:11.243913 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:48:23 crc kubenswrapper[4869]: I0929 14:48:23.242799 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:48:23 crc kubenswrapper[4869]: I0929 14:48:23.644468 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"55d76d3df21abc6aadef60ae533e13fa72706838e321f9a99e6df76db7c0537f"} Sep 29 14:50:50 crc kubenswrapper[4869]: I0929 14:50:50.657561 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:50:50 crc kubenswrapper[4869]: I0929 14:50:50.658207 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:51:20 crc kubenswrapper[4869]: I0929 14:51:20.662558 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:51:20 crc kubenswrapper[4869]: I0929 14:51:20.663083 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:51:50 crc kubenswrapper[4869]: I0929 14:51:50.657975 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:51:50 crc kubenswrapper[4869]: I0929 14:51:50.658556 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:51:50 crc kubenswrapper[4869]: I0929 14:51:50.658621 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:51:50 crc kubenswrapper[4869]: I0929 14:51:50.659503 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"55d76d3df21abc6aadef60ae533e13fa72706838e321f9a99e6df76db7c0537f"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:51:50 crc kubenswrapper[4869]: I0929 14:51:50.659555 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://55d76d3df21abc6aadef60ae533e13fa72706838e321f9a99e6df76db7c0537f" gracePeriod=600 Sep 29 14:51:50 crc kubenswrapper[4869]: I0929 14:51:50.912723 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="55d76d3df21abc6aadef60ae533e13fa72706838e321f9a99e6df76db7c0537f" exitCode=0 Sep 29 14:51:50 crc kubenswrapper[4869]: I0929 14:51:50.913069 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"55d76d3df21abc6aadef60ae533e13fa72706838e321f9a99e6df76db7c0537f"} Sep 29 14:51:50 crc kubenswrapper[4869]: I0929 14:51:50.913164 4869 scope.go:117] "RemoveContainer" containerID="88d660971419e80773cb22bac5e6de369044379ef514e4fd05a620d485a90f40" Sep 29 14:51:51 crc kubenswrapper[4869]: I0929 14:51:51.939789 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39"} Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.047823 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b5gml"] Sep 29 14:51:59 crc kubenswrapper[4869]: E0929 14:51:59.054932 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerName="extract-utilities" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.054979 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerName="extract-utilities" Sep 29 14:51:59 crc kubenswrapper[4869]: E0929 14:51:59.055023 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerName="extract-content" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.055036 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerName="extract-content" Sep 29 14:51:59 crc kubenswrapper[4869]: E0929 14:51:59.055086 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerName="registry-server" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.055096 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerName="registry-server" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.055594 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4978003b-3b4f-4c8e-a81a-57f0957948f3" containerName="registry-server" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.061639 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.067132 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5gml"] Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.155461 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-catalog-content\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.155602 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-utilities\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.155712 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jthx\" (UniqueName: \"kubernetes.io/projected/54d31292-b880-4b30-86a9-a50267cc108d-kube-api-access-4jthx\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.258093 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-catalog-content\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.258234 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-utilities\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.258302 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jthx\" (UniqueName: \"kubernetes.io/projected/54d31292-b880-4b30-86a9-a50267cc108d-kube-api-access-4jthx\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.258644 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-catalog-content\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.258709 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-utilities\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.282052 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jthx\" (UniqueName: \"kubernetes.io/projected/54d31292-b880-4b30-86a9-a50267cc108d-kube-api-access-4jthx\") pod \"redhat-operators-b5gml\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.398645 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:51:59 crc kubenswrapper[4869]: I0929 14:51:59.886512 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5gml"] Sep 29 14:52:00 crc kubenswrapper[4869]: I0929 14:52:00.028947 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5gml" event={"ID":"54d31292-b880-4b30-86a9-a50267cc108d","Type":"ContainerStarted","Data":"a5cce22aa0094912b28212ae39ebf04a6bcbeb13e298689530ac497dd1f68bb0"} Sep 29 14:52:01 crc kubenswrapper[4869]: I0929 14:52:01.040637 4869 generic.go:334] "Generic (PLEG): container finished" podID="54d31292-b880-4b30-86a9-a50267cc108d" containerID="d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba" exitCode=0 Sep 29 14:52:01 crc kubenswrapper[4869]: I0929 14:52:01.040837 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5gml" event={"ID":"54d31292-b880-4b30-86a9-a50267cc108d","Type":"ContainerDied","Data":"d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba"} Sep 29 14:52:01 crc kubenswrapper[4869]: I0929 14:52:01.043731 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:52:02 crc kubenswrapper[4869]: I0929 14:52:02.055391 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5gml" event={"ID":"54d31292-b880-4b30-86a9-a50267cc108d","Type":"ContainerStarted","Data":"f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b"} Sep 29 14:52:04 crc kubenswrapper[4869]: I0929 14:52:04.082372 4869 generic.go:334] "Generic (PLEG): container finished" podID="54d31292-b880-4b30-86a9-a50267cc108d" containerID="f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b" exitCode=0 Sep 29 14:52:04 crc kubenswrapper[4869]: I0929 14:52:04.082494 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5gml" event={"ID":"54d31292-b880-4b30-86a9-a50267cc108d","Type":"ContainerDied","Data":"f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b"} Sep 29 14:52:05 crc kubenswrapper[4869]: I0929 14:52:05.097141 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5gml" event={"ID":"54d31292-b880-4b30-86a9-a50267cc108d","Type":"ContainerStarted","Data":"8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a"} Sep 29 14:52:05 crc kubenswrapper[4869]: I0929 14:52:05.125249 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b5gml" podStartSLOduration=3.577488404 podStartE2EDuration="7.125226965s" podCreationTimestamp="2025-09-29 14:51:58 +0000 UTC" firstStartedPulling="2025-09-29 14:52:01.043459498 +0000 UTC m=+4247.484103818" lastFinishedPulling="2025-09-29 14:52:04.591198059 +0000 UTC m=+4251.031842379" observedRunningTime="2025-09-29 14:52:05.1208343 +0000 UTC m=+4251.561478620" watchObservedRunningTime="2025-09-29 14:52:05.125226965 +0000 UTC m=+4251.565871285" Sep 29 14:52:09 crc kubenswrapper[4869]: I0929 14:52:09.399850 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:52:09 crc kubenswrapper[4869]: I0929 14:52:09.400421 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:52:10 crc kubenswrapper[4869]: I0929 14:52:10.442329 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5gml" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="registry-server" probeResult="failure" output=< Sep 29 14:52:10 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 14:52:10 crc kubenswrapper[4869]: > Sep 29 14:52:19 crc kubenswrapper[4869]: I0929 14:52:19.463318 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:52:19 crc kubenswrapper[4869]: I0929 14:52:19.523758 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:52:19 crc kubenswrapper[4869]: I0929 14:52:19.702290 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5gml"] Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.262388 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b5gml" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="registry-server" containerID="cri-o://8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a" gracePeriod=2 Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.746210 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.813522 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-catalog-content\") pod \"54d31292-b880-4b30-86a9-a50267cc108d\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.815855 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-utilities\") pod \"54d31292-b880-4b30-86a9-a50267cc108d\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.816421 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jthx\" (UniqueName: \"kubernetes.io/projected/54d31292-b880-4b30-86a9-a50267cc108d-kube-api-access-4jthx\") pod \"54d31292-b880-4b30-86a9-a50267cc108d\" (UID: \"54d31292-b880-4b30-86a9-a50267cc108d\") " Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.817252 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-utilities" (OuterVolumeSpecName: "utilities") pod "54d31292-b880-4b30-86a9-a50267cc108d" (UID: "54d31292-b880-4b30-86a9-a50267cc108d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.817385 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.823879 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54d31292-b880-4b30-86a9-a50267cc108d-kube-api-access-4jthx" (OuterVolumeSpecName: "kube-api-access-4jthx") pod "54d31292-b880-4b30-86a9-a50267cc108d" (UID: "54d31292-b880-4b30-86a9-a50267cc108d"). InnerVolumeSpecName "kube-api-access-4jthx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.899894 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "54d31292-b880-4b30-86a9-a50267cc108d" (UID: "54d31292-b880-4b30-86a9-a50267cc108d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.919886 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jthx\" (UniqueName: \"kubernetes.io/projected/54d31292-b880-4b30-86a9-a50267cc108d-kube-api-access-4jthx\") on node \"crc\" DevicePath \"\"" Sep 29 14:52:21 crc kubenswrapper[4869]: I0929 14:52:21.919953 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54d31292-b880-4b30-86a9-a50267cc108d-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.273480 4869 generic.go:334] "Generic (PLEG): container finished" podID="54d31292-b880-4b30-86a9-a50267cc108d" containerID="8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a" exitCode=0 Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.273525 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5gml" event={"ID":"54d31292-b880-4b30-86a9-a50267cc108d","Type":"ContainerDied","Data":"8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a"} Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.273554 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5gml" event={"ID":"54d31292-b880-4b30-86a9-a50267cc108d","Type":"ContainerDied","Data":"a5cce22aa0094912b28212ae39ebf04a6bcbeb13e298689530ac497dd1f68bb0"} Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.273572 4869 scope.go:117] "RemoveContainer" containerID="8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.273727 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5gml" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.300922 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5gml"] Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.314086 4869 scope.go:117] "RemoveContainer" containerID="f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.323285 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b5gml"] Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.361399 4869 scope.go:117] "RemoveContainer" containerID="d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.386143 4869 scope.go:117] "RemoveContainer" containerID="8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a" Sep 29 14:52:22 crc kubenswrapper[4869]: E0929 14:52:22.386703 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a\": container with ID starting with 8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a not found: ID does not exist" containerID="8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.386733 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a"} err="failed to get container status \"8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a\": rpc error: code = NotFound desc = could not find container \"8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a\": container with ID starting with 8de2d061709be82015c49371927300dd4ac8d05e165d6450bcca5cc6c19cb26a not found: ID does not exist" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.386758 4869 scope.go:117] "RemoveContainer" containerID="f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b" Sep 29 14:52:22 crc kubenswrapper[4869]: E0929 14:52:22.387268 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b\": container with ID starting with f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b not found: ID does not exist" containerID="f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.387292 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b"} err="failed to get container status \"f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b\": rpc error: code = NotFound desc = could not find container \"f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b\": container with ID starting with f90d1832d1f4150e95ef5fd7e33fa9d6176957d7e9747a2182124c84402f714b not found: ID does not exist" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.387305 4869 scope.go:117] "RemoveContainer" containerID="d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba" Sep 29 14:52:22 crc kubenswrapper[4869]: E0929 14:52:22.388871 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba\": container with ID starting with d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba not found: ID does not exist" containerID="d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba" Sep 29 14:52:22 crc kubenswrapper[4869]: I0929 14:52:22.388920 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba"} err="failed to get container status \"d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba\": rpc error: code = NotFound desc = could not find container \"d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba\": container with ID starting with d6e1546c35a8ffad7b9ce185137cc5292fbfc86385aafa4d742c795c25197cba not found: ID does not exist" Sep 29 14:52:24 crc kubenswrapper[4869]: I0929 14:52:24.252814 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54d31292-b880-4b30-86a9-a50267cc108d" path="/var/lib/kubelet/pods/54d31292-b880-4b30-86a9-a50267cc108d/volumes" Sep 29 14:53:50 crc kubenswrapper[4869]: I0929 14:53:50.656888 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:53:50 crc kubenswrapper[4869]: I0929 14:53:50.657569 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:54:20 crc kubenswrapper[4869]: I0929 14:54:20.657601 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:54:20 crc kubenswrapper[4869]: I0929 14:54:20.658275 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.760772 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bxsh4"] Sep 29 14:54:36 crc kubenswrapper[4869]: E0929 14:54:36.762138 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="registry-server" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.762159 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="registry-server" Sep 29 14:54:36 crc kubenswrapper[4869]: E0929 14:54:36.762185 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="extract-utilities" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.762194 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="extract-utilities" Sep 29 14:54:36 crc kubenswrapper[4869]: E0929 14:54:36.762209 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="extract-content" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.762219 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="extract-content" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.762489 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="54d31292-b880-4b30-86a9-a50267cc108d" containerName="registry-server" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.764179 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.802279 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-utilities\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.802352 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qhj5\" (UniqueName: \"kubernetes.io/projected/10332744-77c4-4042-991a-799703a55c2f-kube-api-access-9qhj5\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.802599 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-catalog-content\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.804013 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bxsh4"] Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.906838 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-catalog-content\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.906988 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-utilities\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.907044 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qhj5\" (UniqueName: \"kubernetes.io/projected/10332744-77c4-4042-991a-799703a55c2f-kube-api-access-9qhj5\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.907853 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-catalog-content\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.908052 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-utilities\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:36 crc kubenswrapper[4869]: I0929 14:54:36.929658 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qhj5\" (UniqueName: \"kubernetes.io/projected/10332744-77c4-4042-991a-799703a55c2f-kube-api-access-9qhj5\") pod \"community-operators-bxsh4\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:37 crc kubenswrapper[4869]: I0929 14:54:37.093094 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:37 crc kubenswrapper[4869]: I0929 14:54:37.680792 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bxsh4"] Sep 29 14:54:37 crc kubenswrapper[4869]: I0929 14:54:37.716955 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bxsh4" event={"ID":"10332744-77c4-4042-991a-799703a55c2f","Type":"ContainerStarted","Data":"6ce83c83115a9a39ed28b7005fe9ef27e410287cef5045d95f2b20ccf5c91906"} Sep 29 14:54:38 crc kubenswrapper[4869]: I0929 14:54:38.734850 4869 generic.go:334] "Generic (PLEG): container finished" podID="10332744-77c4-4042-991a-799703a55c2f" containerID="f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186" exitCode=0 Sep 29 14:54:38 crc kubenswrapper[4869]: I0929 14:54:38.735029 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bxsh4" event={"ID":"10332744-77c4-4042-991a-799703a55c2f","Type":"ContainerDied","Data":"f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186"} Sep 29 14:54:40 crc kubenswrapper[4869]: I0929 14:54:40.756974 4869 generic.go:334] "Generic (PLEG): container finished" podID="10332744-77c4-4042-991a-799703a55c2f" containerID="48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6" exitCode=0 Sep 29 14:54:40 crc kubenswrapper[4869]: I0929 14:54:40.757078 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bxsh4" event={"ID":"10332744-77c4-4042-991a-799703a55c2f","Type":"ContainerDied","Data":"48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6"} Sep 29 14:54:41 crc kubenswrapper[4869]: I0929 14:54:41.769434 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bxsh4" event={"ID":"10332744-77c4-4042-991a-799703a55c2f","Type":"ContainerStarted","Data":"ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1"} Sep 29 14:54:41 crc kubenswrapper[4869]: I0929 14:54:41.796260 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bxsh4" podStartSLOduration=3.312366404 podStartE2EDuration="5.796231956s" podCreationTimestamp="2025-09-29 14:54:36 +0000 UTC" firstStartedPulling="2025-09-29 14:54:38.736965626 +0000 UTC m=+4405.177609946" lastFinishedPulling="2025-09-29 14:54:41.220831178 +0000 UTC m=+4407.661475498" observedRunningTime="2025-09-29 14:54:41.785715071 +0000 UTC m=+4408.226359401" watchObservedRunningTime="2025-09-29 14:54:41.796231956 +0000 UTC m=+4408.236876276" Sep 29 14:54:47 crc kubenswrapper[4869]: I0929 14:54:47.095041 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:47 crc kubenswrapper[4869]: I0929 14:54:47.095413 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:47 crc kubenswrapper[4869]: I0929 14:54:47.149469 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:47 crc kubenswrapper[4869]: I0929 14:54:47.878586 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:47 crc kubenswrapper[4869]: I0929 14:54:47.931055 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bxsh4"] Sep 29 14:54:49 crc kubenswrapper[4869]: I0929 14:54:49.846002 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bxsh4" podUID="10332744-77c4-4042-991a-799703a55c2f" containerName="registry-server" containerID="cri-o://ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1" gracePeriod=2 Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.453353 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.532440 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-catalog-content\") pod \"10332744-77c4-4042-991a-799703a55c2f\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.532828 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-utilities\") pod \"10332744-77c4-4042-991a-799703a55c2f\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.532931 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qhj5\" (UniqueName: \"kubernetes.io/projected/10332744-77c4-4042-991a-799703a55c2f-kube-api-access-9qhj5\") pod \"10332744-77c4-4042-991a-799703a55c2f\" (UID: \"10332744-77c4-4042-991a-799703a55c2f\") " Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.534490 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-utilities" (OuterVolumeSpecName: "utilities") pod "10332744-77c4-4042-991a-799703a55c2f" (UID: "10332744-77c4-4042-991a-799703a55c2f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.545793 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10332744-77c4-4042-991a-799703a55c2f-kube-api-access-9qhj5" (OuterVolumeSpecName: "kube-api-access-9qhj5") pod "10332744-77c4-4042-991a-799703a55c2f" (UID: "10332744-77c4-4042-991a-799703a55c2f"). InnerVolumeSpecName "kube-api-access-9qhj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.586337 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "10332744-77c4-4042-991a-799703a55c2f" (UID: "10332744-77c4-4042-991a-799703a55c2f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.636091 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.636133 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qhj5\" (UniqueName: \"kubernetes.io/projected/10332744-77c4-4042-991a-799703a55c2f-kube-api-access-9qhj5\") on node \"crc\" DevicePath \"\"" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.636148 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10332744-77c4-4042-991a-799703a55c2f-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.656975 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.657038 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.657097 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.658434 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.658492 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" gracePeriod=600 Sep 29 14:54:50 crc kubenswrapper[4869]: E0929 14:54:50.779502 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.860431 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" exitCode=0 Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.860519 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39"} Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.860584 4869 scope.go:117] "RemoveContainer" containerID="55d76d3df21abc6aadef60ae533e13fa72706838e321f9a99e6df76db7c0537f" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.862049 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:54:50 crc kubenswrapper[4869]: E0929 14:54:50.862658 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.865215 4869 generic.go:334] "Generic (PLEG): container finished" podID="10332744-77c4-4042-991a-799703a55c2f" containerID="ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1" exitCode=0 Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.865270 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bxsh4" event={"ID":"10332744-77c4-4042-991a-799703a55c2f","Type":"ContainerDied","Data":"ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1"} Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.865300 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bxsh4" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.865324 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bxsh4" event={"ID":"10332744-77c4-4042-991a-799703a55c2f","Type":"ContainerDied","Data":"6ce83c83115a9a39ed28b7005fe9ef27e410287cef5045d95f2b20ccf5c91906"} Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.922692 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bxsh4"] Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.933005 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bxsh4"] Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.938075 4869 scope.go:117] "RemoveContainer" containerID="ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1" Sep 29 14:54:50 crc kubenswrapper[4869]: I0929 14:54:50.962412 4869 scope.go:117] "RemoveContainer" containerID="48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6" Sep 29 14:54:51 crc kubenswrapper[4869]: I0929 14:54:51.006351 4869 scope.go:117] "RemoveContainer" containerID="f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186" Sep 29 14:54:51 crc kubenswrapper[4869]: I0929 14:54:51.847422 4869 scope.go:117] "RemoveContainer" containerID="ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1" Sep 29 14:54:51 crc kubenswrapper[4869]: E0929 14:54:51.848086 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1\": container with ID starting with ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1 not found: ID does not exist" containerID="ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1" Sep 29 14:54:51 crc kubenswrapper[4869]: I0929 14:54:51.848130 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1"} err="failed to get container status \"ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1\": rpc error: code = NotFound desc = could not find container \"ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1\": container with ID starting with ab1e08490f3cad7971aae51840ef60db0c583cde3beb41ed07ac2787f098b4a1 not found: ID does not exist" Sep 29 14:54:51 crc kubenswrapper[4869]: I0929 14:54:51.848160 4869 scope.go:117] "RemoveContainer" containerID="48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6" Sep 29 14:54:51 crc kubenswrapper[4869]: E0929 14:54:51.848550 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6\": container with ID starting with 48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6 not found: ID does not exist" containerID="48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6" Sep 29 14:54:51 crc kubenswrapper[4869]: I0929 14:54:51.848585 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6"} err="failed to get container status \"48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6\": rpc error: code = NotFound desc = could not find container \"48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6\": container with ID starting with 48aba486c5f50edc09c644595ee5ce1d30c05e57a141f2beed5c244a5b4ea2c6 not found: ID does not exist" Sep 29 14:54:51 crc kubenswrapper[4869]: I0929 14:54:51.848642 4869 scope.go:117] "RemoveContainer" containerID="f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186" Sep 29 14:54:51 crc kubenswrapper[4869]: E0929 14:54:51.848922 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186\": container with ID starting with f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186 not found: ID does not exist" containerID="f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186" Sep 29 14:54:51 crc kubenswrapper[4869]: I0929 14:54:51.848951 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186"} err="failed to get container status \"f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186\": rpc error: code = NotFound desc = could not find container \"f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186\": container with ID starting with f386749168f89562a8189d51693cc2a29fc03d4252bace97bce628454395f186 not found: ID does not exist" Sep 29 14:54:52 crc kubenswrapper[4869]: I0929 14:54:52.254392 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10332744-77c4-4042-991a-799703a55c2f" path="/var/lib/kubelet/pods/10332744-77c4-4042-991a-799703a55c2f/volumes" Sep 29 14:55:04 crc kubenswrapper[4869]: I0929 14:55:04.248482 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:55:04 crc kubenswrapper[4869]: E0929 14:55:04.249146 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:55:17 crc kubenswrapper[4869]: I0929 14:55:17.244602 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:55:17 crc kubenswrapper[4869]: E0929 14:55:17.246057 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:55:28 crc kubenswrapper[4869]: I0929 14:55:28.242734 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:55:28 crc kubenswrapper[4869]: E0929 14:55:28.243805 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:55:39 crc kubenswrapper[4869]: I0929 14:55:39.242830 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:55:39 crc kubenswrapper[4869]: E0929 14:55:39.243670 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:55:53 crc kubenswrapper[4869]: I0929 14:55:53.242561 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:55:53 crc kubenswrapper[4869]: E0929 14:55:53.243791 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:56:04 crc kubenswrapper[4869]: I0929 14:56:04.248895 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:56:04 crc kubenswrapper[4869]: E0929 14:56:04.249604 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:56:19 crc kubenswrapper[4869]: I0929 14:56:19.241887 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:56:19 crc kubenswrapper[4869]: E0929 14:56:19.242634 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:56:33 crc kubenswrapper[4869]: I0929 14:56:33.242812 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:56:33 crc kubenswrapper[4869]: E0929 14:56:33.243782 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:56:48 crc kubenswrapper[4869]: I0929 14:56:48.242302 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:56:48 crc kubenswrapper[4869]: E0929 14:56:48.243285 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:57:00 crc kubenswrapper[4869]: I0929 14:57:00.242649 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:57:00 crc kubenswrapper[4869]: E0929 14:57:00.243394 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:57:15 crc kubenswrapper[4869]: I0929 14:57:15.242990 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:57:15 crc kubenswrapper[4869]: E0929 14:57:15.244014 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:57:29 crc kubenswrapper[4869]: I0929 14:57:29.242706 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:57:29 crc kubenswrapper[4869]: E0929 14:57:29.243736 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:57:43 crc kubenswrapper[4869]: I0929 14:57:43.242861 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:57:43 crc kubenswrapper[4869]: E0929 14:57:43.244135 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:57:57 crc kubenswrapper[4869]: I0929 14:57:57.241999 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:57:57 crc kubenswrapper[4869]: E0929 14:57:57.242888 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:58:09 crc kubenswrapper[4869]: I0929 14:58:09.242685 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:58:09 crc kubenswrapper[4869]: E0929 14:58:09.243326 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.397737 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ghhfl"] Sep 29 14:58:18 crc kubenswrapper[4869]: E0929 14:58:18.399202 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10332744-77c4-4042-991a-799703a55c2f" containerName="registry-server" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.399227 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="10332744-77c4-4042-991a-799703a55c2f" containerName="registry-server" Sep 29 14:58:18 crc kubenswrapper[4869]: E0929 14:58:18.399267 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10332744-77c4-4042-991a-799703a55c2f" containerName="extract-utilities" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.399276 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="10332744-77c4-4042-991a-799703a55c2f" containerName="extract-utilities" Sep 29 14:58:18 crc kubenswrapper[4869]: E0929 14:58:18.399293 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10332744-77c4-4042-991a-799703a55c2f" containerName="extract-content" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.399302 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="10332744-77c4-4042-991a-799703a55c2f" containerName="extract-content" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.399566 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="10332744-77c4-4042-991a-799703a55c2f" containerName="registry-server" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.401585 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.412667 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghhfl"] Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.435379 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfbmm\" (UniqueName: \"kubernetes.io/projected/0cd761fb-1832-4045-8a43-66a91e49ce3c-kube-api-access-vfbmm\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.435562 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-utilities\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.435921 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-catalog-content\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.536831 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-utilities\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.536930 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-catalog-content\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.537010 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfbmm\" (UniqueName: \"kubernetes.io/projected/0cd761fb-1832-4045-8a43-66a91e49ce3c-kube-api-access-vfbmm\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.538422 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-catalog-content\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.538880 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-utilities\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.558177 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfbmm\" (UniqueName: \"kubernetes.io/projected/0cd761fb-1832-4045-8a43-66a91e49ce3c-kube-api-access-vfbmm\") pod \"redhat-marketplace-ghhfl\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:18 crc kubenswrapper[4869]: I0929 14:58:18.734176 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:19 crc kubenswrapper[4869]: I0929 14:58:19.205226 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghhfl"] Sep 29 14:58:20 crc kubenswrapper[4869]: I0929 14:58:20.001297 4869 generic.go:334] "Generic (PLEG): container finished" podID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerID="8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5" exitCode=0 Sep 29 14:58:20 crc kubenswrapper[4869]: I0929 14:58:20.001369 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghhfl" event={"ID":"0cd761fb-1832-4045-8a43-66a91e49ce3c","Type":"ContainerDied","Data":"8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5"} Sep 29 14:58:20 crc kubenswrapper[4869]: I0929 14:58:20.002663 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghhfl" event={"ID":"0cd761fb-1832-4045-8a43-66a91e49ce3c","Type":"ContainerStarted","Data":"105738ebebdfc254bb29c41ed9e7840db6218ebf3e5bd729f803aa4a967dd077"} Sep 29 14:58:20 crc kubenswrapper[4869]: I0929 14:58:20.004298 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 14:58:21 crc kubenswrapper[4869]: I0929 14:58:21.022199 4869 generic.go:334] "Generic (PLEG): container finished" podID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerID="6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56" exitCode=0 Sep 29 14:58:21 crc kubenswrapper[4869]: I0929 14:58:21.022315 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghhfl" event={"ID":"0cd761fb-1832-4045-8a43-66a91e49ce3c","Type":"ContainerDied","Data":"6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56"} Sep 29 14:58:23 crc kubenswrapper[4869]: I0929 14:58:23.063995 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghhfl" event={"ID":"0cd761fb-1832-4045-8a43-66a91e49ce3c","Type":"ContainerStarted","Data":"8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187"} Sep 29 14:58:23 crc kubenswrapper[4869]: I0929 14:58:23.095092 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ghhfl" podStartSLOduration=3.342047879 podStartE2EDuration="5.095064264s" podCreationTimestamp="2025-09-29 14:58:18 +0000 UTC" firstStartedPulling="2025-09-29 14:58:20.004063504 +0000 UTC m=+4626.444707824" lastFinishedPulling="2025-09-29 14:58:21.757079889 +0000 UTC m=+4628.197724209" observedRunningTime="2025-09-29 14:58:23.092745093 +0000 UTC m=+4629.533389433" watchObservedRunningTime="2025-09-29 14:58:23.095064264 +0000 UTC m=+4629.535708584" Sep 29 14:58:23 crc kubenswrapper[4869]: I0929 14:58:23.242509 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:58:23 crc kubenswrapper[4869]: E0929 14:58:23.243086 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:58:28 crc kubenswrapper[4869]: I0929 14:58:28.735368 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:28 crc kubenswrapper[4869]: I0929 14:58:28.736080 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:28 crc kubenswrapper[4869]: I0929 14:58:28.794057 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:29 crc kubenswrapper[4869]: I0929 14:58:29.204182 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:29 crc kubenswrapper[4869]: I0929 14:58:29.252323 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghhfl"] Sep 29 14:58:31 crc kubenswrapper[4869]: I0929 14:58:31.186697 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ghhfl" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerName="registry-server" containerID="cri-o://8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187" gracePeriod=2 Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.130467 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.199005 4869 generic.go:334] "Generic (PLEG): container finished" podID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerID="8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187" exitCode=0 Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.199065 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghhfl" event={"ID":"0cd761fb-1832-4045-8a43-66a91e49ce3c","Type":"ContainerDied","Data":"8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187"} Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.199108 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghhfl" event={"ID":"0cd761fb-1832-4045-8a43-66a91e49ce3c","Type":"ContainerDied","Data":"105738ebebdfc254bb29c41ed9e7840db6218ebf3e5bd729f803aa4a967dd077"} Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.199157 4869 scope.go:117] "RemoveContainer" containerID="8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.199354 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ghhfl" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.232184 4869 scope.go:117] "RemoveContainer" containerID="6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.257644 4869 scope.go:117] "RemoveContainer" containerID="8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.259903 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-utilities\") pod \"0cd761fb-1832-4045-8a43-66a91e49ce3c\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.260086 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfbmm\" (UniqueName: \"kubernetes.io/projected/0cd761fb-1832-4045-8a43-66a91e49ce3c-kube-api-access-vfbmm\") pod \"0cd761fb-1832-4045-8a43-66a91e49ce3c\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.260209 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-catalog-content\") pod \"0cd761fb-1832-4045-8a43-66a91e49ce3c\" (UID: \"0cd761fb-1832-4045-8a43-66a91e49ce3c\") " Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.262425 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-utilities" (OuterVolumeSpecName: "utilities") pod "0cd761fb-1832-4045-8a43-66a91e49ce3c" (UID: "0cd761fb-1832-4045-8a43-66a91e49ce3c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.266440 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cd761fb-1832-4045-8a43-66a91e49ce3c-kube-api-access-vfbmm" (OuterVolumeSpecName: "kube-api-access-vfbmm") pod "0cd761fb-1832-4045-8a43-66a91e49ce3c" (UID: "0cd761fb-1832-4045-8a43-66a91e49ce3c"). InnerVolumeSpecName "kube-api-access-vfbmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.273083 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0cd761fb-1832-4045-8a43-66a91e49ce3c" (UID: "0cd761fb-1832-4045-8a43-66a91e49ce3c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.364164 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.364203 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfbmm\" (UniqueName: \"kubernetes.io/projected/0cd761fb-1832-4045-8a43-66a91e49ce3c-kube-api-access-vfbmm\") on node \"crc\" DevicePath \"\"" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.364214 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cd761fb-1832-4045-8a43-66a91e49ce3c-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.379618 4869 scope.go:117] "RemoveContainer" containerID="8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187" Sep 29 14:58:32 crc kubenswrapper[4869]: E0929 14:58:32.380152 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187\": container with ID starting with 8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187 not found: ID does not exist" containerID="8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.380185 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187"} err="failed to get container status \"8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187\": rpc error: code = NotFound desc = could not find container \"8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187\": container with ID starting with 8689344bda778a4fe9b045608c885d64688308b5b0274b99c584766671dd0187 not found: ID does not exist" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.380213 4869 scope.go:117] "RemoveContainer" containerID="6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56" Sep 29 14:58:32 crc kubenswrapper[4869]: E0929 14:58:32.380540 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56\": container with ID starting with 6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56 not found: ID does not exist" containerID="6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.380596 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56"} err="failed to get container status \"6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56\": rpc error: code = NotFound desc = could not find container \"6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56\": container with ID starting with 6079204ab2c21a831cd301d619a5de65688e2dc6b2574a6ce5587034e03a4c56 not found: ID does not exist" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.380643 4869 scope.go:117] "RemoveContainer" containerID="8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5" Sep 29 14:58:32 crc kubenswrapper[4869]: E0929 14:58:32.380931 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5\": container with ID starting with 8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5 not found: ID does not exist" containerID="8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.380958 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5"} err="failed to get container status \"8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5\": rpc error: code = NotFound desc = could not find container \"8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5\": container with ID starting with 8c3c66d48eaea9775a1eda741fcdc0cf487e06662f885f6ad8c976142f26c3e5 not found: ID does not exist" Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.542108 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghhfl"] Sep 29 14:58:32 crc kubenswrapper[4869]: I0929 14:58:32.550136 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghhfl"] Sep 29 14:58:34 crc kubenswrapper[4869]: I0929 14:58:34.253940 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" path="/var/lib/kubelet/pods/0cd761fb-1832-4045-8a43-66a91e49ce3c/volumes" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.260599 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:58:35 crc kubenswrapper[4869]: E0929 14:58:35.261188 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.646303 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7l5nn"] Sep 29 14:58:35 crc kubenswrapper[4869]: E0929 14:58:35.646910 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerName="registry-server" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.646932 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerName="registry-server" Sep 29 14:58:35 crc kubenswrapper[4869]: E0929 14:58:35.646962 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerName="extract-utilities" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.646970 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerName="extract-utilities" Sep 29 14:58:35 crc kubenswrapper[4869]: E0929 14:58:35.647001 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerName="extract-content" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.647010 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerName="extract-content" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.647273 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cd761fb-1832-4045-8a43-66a91e49ce3c" containerName="registry-server" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.649501 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.686103 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7l5nn"] Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.752731 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-utilities\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.753421 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25gbk\" (UniqueName: \"kubernetes.io/projected/b1098f07-c42c-4a05-9b22-68fe024d8a3c-kube-api-access-25gbk\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.753525 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-catalog-content\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.855907 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-catalog-content\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.855973 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-utilities\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.856230 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25gbk\" (UniqueName: \"kubernetes.io/projected/b1098f07-c42c-4a05-9b22-68fe024d8a3c-kube-api-access-25gbk\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.856600 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-catalog-content\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.856644 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-utilities\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.878461 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25gbk\" (UniqueName: \"kubernetes.io/projected/b1098f07-c42c-4a05-9b22-68fe024d8a3c-kube-api-access-25gbk\") pod \"certified-operators-7l5nn\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:35 crc kubenswrapper[4869]: I0929 14:58:35.986691 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:36 crc kubenswrapper[4869]: I0929 14:58:36.537978 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7l5nn"] Sep 29 14:58:37 crc kubenswrapper[4869]: I0929 14:58:37.285957 4869 generic.go:334] "Generic (PLEG): container finished" podID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerID="b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00" exitCode=0 Sep 29 14:58:37 crc kubenswrapper[4869]: I0929 14:58:37.286044 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7l5nn" event={"ID":"b1098f07-c42c-4a05-9b22-68fe024d8a3c","Type":"ContainerDied","Data":"b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00"} Sep 29 14:58:37 crc kubenswrapper[4869]: I0929 14:58:37.286651 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7l5nn" event={"ID":"b1098f07-c42c-4a05-9b22-68fe024d8a3c","Type":"ContainerStarted","Data":"674ed3165321fa3b9bf1d63ac76814b637a826e37a696cf30efa128bba11b387"} Sep 29 14:58:38 crc kubenswrapper[4869]: I0929 14:58:38.302006 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7l5nn" event={"ID":"b1098f07-c42c-4a05-9b22-68fe024d8a3c","Type":"ContainerStarted","Data":"034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340"} Sep 29 14:58:39 crc kubenswrapper[4869]: I0929 14:58:39.313900 4869 generic.go:334] "Generic (PLEG): container finished" podID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerID="034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340" exitCode=0 Sep 29 14:58:39 crc kubenswrapper[4869]: I0929 14:58:39.313974 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7l5nn" event={"ID":"b1098f07-c42c-4a05-9b22-68fe024d8a3c","Type":"ContainerDied","Data":"034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340"} Sep 29 14:58:40 crc kubenswrapper[4869]: I0929 14:58:40.333777 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7l5nn" event={"ID":"b1098f07-c42c-4a05-9b22-68fe024d8a3c","Type":"ContainerStarted","Data":"df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051"} Sep 29 14:58:40 crc kubenswrapper[4869]: I0929 14:58:40.362979 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7l5nn" podStartSLOduration=2.700309425 podStartE2EDuration="5.362957525s" podCreationTimestamp="2025-09-29 14:58:35 +0000 UTC" firstStartedPulling="2025-09-29 14:58:37.288279362 +0000 UTC m=+4643.728923692" lastFinishedPulling="2025-09-29 14:58:39.950927462 +0000 UTC m=+4646.391571792" observedRunningTime="2025-09-29 14:58:40.359213527 +0000 UTC m=+4646.799857847" watchObservedRunningTime="2025-09-29 14:58:40.362957525 +0000 UTC m=+4646.803601845" Sep 29 14:58:45 crc kubenswrapper[4869]: I0929 14:58:45.987656 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:45 crc kubenswrapper[4869]: I0929 14:58:45.988225 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:46 crc kubenswrapper[4869]: I0929 14:58:46.649514 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:46 crc kubenswrapper[4869]: I0929 14:58:46.703678 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:46 crc kubenswrapper[4869]: I0929 14:58:46.918914 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7l5nn"] Sep 29 14:58:47 crc kubenswrapper[4869]: I0929 14:58:47.242394 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:58:47 crc kubenswrapper[4869]: E0929 14:58:47.244197 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:58:48 crc kubenswrapper[4869]: I0929 14:58:48.418896 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7l5nn" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerName="registry-server" containerID="cri-o://df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051" gracePeriod=2 Sep 29 14:58:48 crc kubenswrapper[4869]: I0929 14:58:48.911073 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:48 crc kubenswrapper[4869]: I0929 14:58:48.963357 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-utilities\") pod \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " Sep 29 14:58:48 crc kubenswrapper[4869]: I0929 14:58:48.963940 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-catalog-content\") pod \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " Sep 29 14:58:48 crc kubenswrapper[4869]: I0929 14:58:48.963976 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25gbk\" (UniqueName: \"kubernetes.io/projected/b1098f07-c42c-4a05-9b22-68fe024d8a3c-kube-api-access-25gbk\") pod \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\" (UID: \"b1098f07-c42c-4a05-9b22-68fe024d8a3c\") " Sep 29 14:58:48 crc kubenswrapper[4869]: I0929 14:58:48.964604 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-utilities" (OuterVolumeSpecName: "utilities") pod "b1098f07-c42c-4a05-9b22-68fe024d8a3c" (UID: "b1098f07-c42c-4a05-9b22-68fe024d8a3c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:58:48 crc kubenswrapper[4869]: I0929 14:58:48.965080 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 14:58:48 crc kubenswrapper[4869]: I0929 14:58:48.970565 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1098f07-c42c-4a05-9b22-68fe024d8a3c-kube-api-access-25gbk" (OuterVolumeSpecName: "kube-api-access-25gbk") pod "b1098f07-c42c-4a05-9b22-68fe024d8a3c" (UID: "b1098f07-c42c-4a05-9b22-68fe024d8a3c"). InnerVolumeSpecName "kube-api-access-25gbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.076941 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25gbk\" (UniqueName: \"kubernetes.io/projected/b1098f07-c42c-4a05-9b22-68fe024d8a3c-kube-api-access-25gbk\") on node \"crc\" DevicePath \"\"" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.096490 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b1098f07-c42c-4a05-9b22-68fe024d8a3c" (UID: "b1098f07-c42c-4a05-9b22-68fe024d8a3c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.179657 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1098f07-c42c-4a05-9b22-68fe024d8a3c-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.433783 4869 generic.go:334] "Generic (PLEG): container finished" podID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerID="df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051" exitCode=0 Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.433980 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7l5nn" event={"ID":"b1098f07-c42c-4a05-9b22-68fe024d8a3c","Type":"ContainerDied","Data":"df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051"} Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.434122 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7l5nn" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.434981 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7l5nn" event={"ID":"b1098f07-c42c-4a05-9b22-68fe024d8a3c","Type":"ContainerDied","Data":"674ed3165321fa3b9bf1d63ac76814b637a826e37a696cf30efa128bba11b387"} Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.435111 4869 scope.go:117] "RemoveContainer" containerID="df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.459866 4869 scope.go:117] "RemoveContainer" containerID="034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.478905 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7l5nn"] Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.504365 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7l5nn"] Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.517352 4869 scope.go:117] "RemoveContainer" containerID="b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.542852 4869 scope.go:117] "RemoveContainer" containerID="df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051" Sep 29 14:58:49 crc kubenswrapper[4869]: E0929 14:58:49.543497 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051\": container with ID starting with df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051 not found: ID does not exist" containerID="df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.543545 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051"} err="failed to get container status \"df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051\": rpc error: code = NotFound desc = could not find container \"df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051\": container with ID starting with df946278978eef660bab2854d0cd8c4dc7bb7790eca52870e2ff6a80cd9ec051 not found: ID does not exist" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.543578 4869 scope.go:117] "RemoveContainer" containerID="034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340" Sep 29 14:58:49 crc kubenswrapper[4869]: E0929 14:58:49.544113 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340\": container with ID starting with 034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340 not found: ID does not exist" containerID="034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.544165 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340"} err="failed to get container status \"034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340\": rpc error: code = NotFound desc = could not find container \"034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340\": container with ID starting with 034d8ea0cd3ff1230888578db96f99dd5a3d5b6ee9e7cf3a7a8832a2a3fea340 not found: ID does not exist" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.544198 4869 scope.go:117] "RemoveContainer" containerID="b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00" Sep 29 14:58:49 crc kubenswrapper[4869]: E0929 14:58:49.544627 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00\": container with ID starting with b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00 not found: ID does not exist" containerID="b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00" Sep 29 14:58:49 crc kubenswrapper[4869]: I0929 14:58:49.544688 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00"} err="failed to get container status \"b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00\": rpc error: code = NotFound desc = could not find container \"b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00\": container with ID starting with b076628aee91ed5d066e92505bdbe055ca70d79f331dc42b8616859902010a00 not found: ID does not exist" Sep 29 14:58:50 crc kubenswrapper[4869]: I0929 14:58:50.253016 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" path="/var/lib/kubelet/pods/b1098f07-c42c-4a05-9b22-68fe024d8a3c/volumes" Sep 29 14:59:01 crc kubenswrapper[4869]: I0929 14:59:01.243045 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:59:01 crc kubenswrapper[4869]: E0929 14:59:01.244514 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:59:16 crc kubenswrapper[4869]: I0929 14:59:16.242005 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:59:16 crc kubenswrapper[4869]: E0929 14:59:16.242776 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:59:29 crc kubenswrapper[4869]: I0929 14:59:29.242037 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:59:29 crc kubenswrapper[4869]: E0929 14:59:29.242845 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:59:41 crc kubenswrapper[4869]: I0929 14:59:41.242656 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:59:41 crc kubenswrapper[4869]: E0929 14:59:41.244556 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 14:59:52 crc kubenswrapper[4869]: I0929 14:59:52.243520 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 14:59:53 crc kubenswrapper[4869]: I0929 14:59:53.119295 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"d029a17436a6b6c78d99602e949e766185d1288952f3500ecd4e16464b5ff926"} Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.180138 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl"] Sep 29 15:00:00 crc kubenswrapper[4869]: E0929 15:00:00.181352 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerName="extract-content" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.181374 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerName="extract-content" Sep 29 15:00:00 crc kubenswrapper[4869]: E0929 15:00:00.181410 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerName="extract-utilities" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.181418 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerName="extract-utilities" Sep 29 15:00:00 crc kubenswrapper[4869]: E0929 15:00:00.181430 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerName="registry-server" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.181437 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerName="registry-server" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.181747 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1098f07-c42c-4a05-9b22-68fe024d8a3c" containerName="registry-server" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.182830 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.186921 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.187096 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.203425 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl"] Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.312564 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-secret-volume\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.312626 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn22w\" (UniqueName: \"kubernetes.io/projected/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-kube-api-access-wn22w\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.313086 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-config-volume\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.416008 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-secret-volume\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.416091 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn22w\" (UniqueName: \"kubernetes.io/projected/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-kube-api-access-wn22w\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.417120 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-config-volume\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.417926 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-config-volume\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.422367 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-secret-volume\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.437444 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn22w\" (UniqueName: \"kubernetes.io/projected/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-kube-api-access-wn22w\") pod \"collect-profiles-29319300-fbtrl\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.516306 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:00 crc kubenswrapper[4869]: I0929 15:00:00.982836 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl"] Sep 29 15:00:01 crc kubenswrapper[4869]: I0929 15:00:01.202503 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" event={"ID":"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1","Type":"ContainerStarted","Data":"ffcb6281a3b40c48326dcc328c95f2f898e745b2d5d480447659d16528094be2"} Sep 29 15:00:01 crc kubenswrapper[4869]: I0929 15:00:01.202568 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" event={"ID":"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1","Type":"ContainerStarted","Data":"88f279945d37b563ebba0c755c335a738568e13ef79c5ed91fd96ed3a472e2c9"} Sep 29 15:00:01 crc kubenswrapper[4869]: I0929 15:00:01.227853 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" podStartSLOduration=1.22782901 podStartE2EDuration="1.22782901s" podCreationTimestamp="2025-09-29 15:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 15:00:01.221141255 +0000 UTC m=+4727.661785585" watchObservedRunningTime="2025-09-29 15:00:01.22782901 +0000 UTC m=+4727.668473330" Sep 29 15:00:02 crc kubenswrapper[4869]: I0929 15:00:02.213786 4869 generic.go:334] "Generic (PLEG): container finished" podID="f51fdb21-b167-4268-a4bf-c8c32bcb2ba1" containerID="ffcb6281a3b40c48326dcc328c95f2f898e745b2d5d480447659d16528094be2" exitCode=0 Sep 29 15:00:02 crc kubenswrapper[4869]: I0929 15:00:02.213896 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" event={"ID":"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1","Type":"ContainerDied","Data":"ffcb6281a3b40c48326dcc328c95f2f898e745b2d5d480447659d16528094be2"} Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.603712 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.799582 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-config-volume\") pod \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.799739 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-secret-volume\") pod \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.800100 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wn22w\" (UniqueName: \"kubernetes.io/projected/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-kube-api-access-wn22w\") pod \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\" (UID: \"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1\") " Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.800276 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-config-volume" (OuterVolumeSpecName: "config-volume") pod "f51fdb21-b167-4268-a4bf-c8c32bcb2ba1" (UID: "f51fdb21-b167-4268-a4bf-c8c32bcb2ba1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.800698 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.808864 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f51fdb21-b167-4268-a4bf-c8c32bcb2ba1" (UID: "f51fdb21-b167-4268-a4bf-c8c32bcb2ba1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.808936 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-kube-api-access-wn22w" (OuterVolumeSpecName: "kube-api-access-wn22w") pod "f51fdb21-b167-4268-a4bf-c8c32bcb2ba1" (UID: "f51fdb21-b167-4268-a4bf-c8c32bcb2ba1"). InnerVolumeSpecName "kube-api-access-wn22w". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.902715 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wn22w\" (UniqueName: \"kubernetes.io/projected/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-kube-api-access-wn22w\") on node \"crc\" DevicePath \"\"" Sep 29 15:00:03 crc kubenswrapper[4869]: I0929 15:00:03.902752 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f51fdb21-b167-4268-a4bf-c8c32bcb2ba1-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 15:00:04 crc kubenswrapper[4869]: I0929 15:00:04.235628 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" event={"ID":"f51fdb21-b167-4268-a4bf-c8c32bcb2ba1","Type":"ContainerDied","Data":"88f279945d37b563ebba0c755c335a738568e13ef79c5ed91fd96ed3a472e2c9"} Sep 29 15:00:04 crc kubenswrapper[4869]: I0929 15:00:04.235861 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88f279945d37b563ebba0c755c335a738568e13ef79c5ed91fd96ed3a472e2c9" Sep 29 15:00:04 crc kubenswrapper[4869]: I0929 15:00:04.235712 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319300-fbtrl" Sep 29 15:00:04 crc kubenswrapper[4869]: I0929 15:00:04.299565 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str"] Sep 29 15:00:04 crc kubenswrapper[4869]: I0929 15:00:04.309487 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319255-x8str"] Sep 29 15:00:06 crc kubenswrapper[4869]: I0929 15:00:06.254334 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f70b334-c864-4221-b25a-fa532dcd6798" path="/var/lib/kubelet/pods/7f70b334-c864-4221-b25a-fa532dcd6798/volumes" Sep 29 15:00:24 crc kubenswrapper[4869]: I0929 15:00:24.935776 4869 scope.go:117] "RemoveContainer" containerID="43797e3ad02dbed3d58c2c0222947b20d291830fca32928d2e996705cb27821d" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.157191 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29319301-ml6vq"] Sep 29 15:01:00 crc kubenswrapper[4869]: E0929 15:01:00.158640 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f51fdb21-b167-4268-a4bf-c8c32bcb2ba1" containerName="collect-profiles" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.158668 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="f51fdb21-b167-4268-a4bf-c8c32bcb2ba1" containerName="collect-profiles" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.159082 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="f51fdb21-b167-4268-a4bf-c8c32bcb2ba1" containerName="collect-profiles" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.160460 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.172998 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29319301-ml6vq"] Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.291013 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-combined-ca-bundle\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.291174 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-config-data\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.291225 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-fernet-keys\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.291694 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4k5t\" (UniqueName: \"kubernetes.io/projected/7ae04f1c-5944-43ce-a66d-2e04f96cc301-kube-api-access-q4k5t\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.393821 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-config-data\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.393892 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-fernet-keys\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.393953 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4k5t\" (UniqueName: \"kubernetes.io/projected/7ae04f1c-5944-43ce-a66d-2e04f96cc301-kube-api-access-q4k5t\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.394027 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-combined-ca-bundle\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.400431 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-combined-ca-bundle\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.401486 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-config-data\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.401681 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-fernet-keys\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.412713 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4k5t\" (UniqueName: \"kubernetes.io/projected/7ae04f1c-5944-43ce-a66d-2e04f96cc301-kube-api-access-q4k5t\") pod \"keystone-cron-29319301-ml6vq\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.479811 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:00 crc kubenswrapper[4869]: I0929 15:01:00.929336 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29319301-ml6vq"] Sep 29 15:01:01 crc kubenswrapper[4869]: W0929 15:01:01.300409 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ae04f1c_5944_43ce_a66d_2e04f96cc301.slice/crio-2127e01c333bd2f59f5e4cbe94101b7c8a77c5153d626bfe91f9a231d8e9d865 WatchSource:0}: Error finding container 2127e01c333bd2f59f5e4cbe94101b7c8a77c5153d626bfe91f9a231d8e9d865: Status 404 returned error can't find the container with id 2127e01c333bd2f59f5e4cbe94101b7c8a77c5153d626bfe91f9a231d8e9d865 Sep 29 15:01:01 crc kubenswrapper[4869]: I0929 15:01:01.835660 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29319301-ml6vq" event={"ID":"7ae04f1c-5944-43ce-a66d-2e04f96cc301","Type":"ContainerStarted","Data":"9db600a22857f0192764bf023dfdbfa3b192ec5761f454680be174392937d234"} Sep 29 15:01:01 crc kubenswrapper[4869]: I0929 15:01:01.836207 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29319301-ml6vq" event={"ID":"7ae04f1c-5944-43ce-a66d-2e04f96cc301","Type":"ContainerStarted","Data":"2127e01c333bd2f59f5e4cbe94101b7c8a77c5153d626bfe91f9a231d8e9d865"} Sep 29 15:01:01 crc kubenswrapper[4869]: I0929 15:01:01.862433 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29319301-ml6vq" podStartSLOduration=1.862408924 podStartE2EDuration="1.862408924s" podCreationTimestamp="2025-09-29 15:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 15:01:01.857994549 +0000 UTC m=+4788.298638879" watchObservedRunningTime="2025-09-29 15:01:01.862408924 +0000 UTC m=+4788.303053244" Sep 29 15:01:05 crc kubenswrapper[4869]: I0929 15:01:05.877576 4869 generic.go:334] "Generic (PLEG): container finished" podID="7ae04f1c-5944-43ce-a66d-2e04f96cc301" containerID="9db600a22857f0192764bf023dfdbfa3b192ec5761f454680be174392937d234" exitCode=0 Sep 29 15:01:05 crc kubenswrapper[4869]: I0929 15:01:05.877684 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29319301-ml6vq" event={"ID":"7ae04f1c-5944-43ce-a66d-2e04f96cc301","Type":"ContainerDied","Data":"9db600a22857f0192764bf023dfdbfa3b192ec5761f454680be174392937d234"} Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.276182 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.354424 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-combined-ca-bundle\") pod \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.354861 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4k5t\" (UniqueName: \"kubernetes.io/projected/7ae04f1c-5944-43ce-a66d-2e04f96cc301-kube-api-access-q4k5t\") pod \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.355022 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-config-data\") pod \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.355145 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-fernet-keys\") pod \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\" (UID: \"7ae04f1c-5944-43ce-a66d-2e04f96cc301\") " Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.364347 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7ae04f1c-5944-43ce-a66d-2e04f96cc301" (UID: "7ae04f1c-5944-43ce-a66d-2e04f96cc301"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.364542 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ae04f1c-5944-43ce-a66d-2e04f96cc301-kube-api-access-q4k5t" (OuterVolumeSpecName: "kube-api-access-q4k5t") pod "7ae04f1c-5944-43ce-a66d-2e04f96cc301" (UID: "7ae04f1c-5944-43ce-a66d-2e04f96cc301"). InnerVolumeSpecName "kube-api-access-q4k5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.387558 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ae04f1c-5944-43ce-a66d-2e04f96cc301" (UID: "7ae04f1c-5944-43ce-a66d-2e04f96cc301"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.412905 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-config-data" (OuterVolumeSpecName: "config-data") pod "7ae04f1c-5944-43ce-a66d-2e04f96cc301" (UID: "7ae04f1c-5944-43ce-a66d-2e04f96cc301"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.458121 4869 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.458219 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4k5t\" (UniqueName: \"kubernetes.io/projected/7ae04f1c-5944-43ce-a66d-2e04f96cc301-kube-api-access-q4k5t\") on node \"crc\" DevicePath \"\"" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.458234 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.458242 4869 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7ae04f1c-5944-43ce-a66d-2e04f96cc301-fernet-keys\") on node \"crc\" DevicePath \"\"" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.901305 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29319301-ml6vq" event={"ID":"7ae04f1c-5944-43ce-a66d-2e04f96cc301","Type":"ContainerDied","Data":"2127e01c333bd2f59f5e4cbe94101b7c8a77c5153d626bfe91f9a231d8e9d865"} Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.901362 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2127e01c333bd2f59f5e4cbe94101b7c8a77c5153d626bfe91f9a231d8e9d865" Sep 29 15:01:07 crc kubenswrapper[4869]: I0929 15:01:07.901381 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29319301-ml6vq" Sep 29 15:02:19 crc kubenswrapper[4869]: I0929 15:02:19.745943 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zh29p"] Sep 29 15:02:19 crc kubenswrapper[4869]: E0929 15:02:19.748341 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ae04f1c-5944-43ce-a66d-2e04f96cc301" containerName="keystone-cron" Sep 29 15:02:19 crc kubenswrapper[4869]: I0929 15:02:19.748464 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ae04f1c-5944-43ce-a66d-2e04f96cc301" containerName="keystone-cron" Sep 29 15:02:19 crc kubenswrapper[4869]: I0929 15:02:19.748749 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ae04f1c-5944-43ce-a66d-2e04f96cc301" containerName="keystone-cron" Sep 29 15:02:19 crc kubenswrapper[4869]: I0929 15:02:19.750706 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:19 crc kubenswrapper[4869]: I0929 15:02:19.761240 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zh29p"] Sep 29 15:02:19 crc kubenswrapper[4869]: I0929 15:02:19.903398 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-utilities\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:19 crc kubenswrapper[4869]: I0929 15:02:19.903670 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4hp5\" (UniqueName: \"kubernetes.io/projected/e6af4d73-f96a-43fc-b397-cc786c200a7b-kube-api-access-p4hp5\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:19 crc kubenswrapper[4869]: I0929 15:02:19.903766 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-catalog-content\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.006220 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-utilities\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.006277 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4hp5\" (UniqueName: \"kubernetes.io/projected/e6af4d73-f96a-43fc-b397-cc786c200a7b-kube-api-access-p4hp5\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.006332 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-catalog-content\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.006872 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-utilities\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.006900 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-catalog-content\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.028406 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4hp5\" (UniqueName: \"kubernetes.io/projected/e6af4d73-f96a-43fc-b397-cc786c200a7b-kube-api-access-p4hp5\") pod \"redhat-operators-zh29p\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.070077 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.632294 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zh29p"] Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.657214 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:02:20 crc kubenswrapper[4869]: I0929 15:02:20.657290 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:02:21 crc kubenswrapper[4869]: I0929 15:02:21.621563 4869 generic.go:334] "Generic (PLEG): container finished" podID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerID="8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032" exitCode=0 Sep 29 15:02:21 crc kubenswrapper[4869]: I0929 15:02:21.621687 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zh29p" event={"ID":"e6af4d73-f96a-43fc-b397-cc786c200a7b","Type":"ContainerDied","Data":"8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032"} Sep 29 15:02:21 crc kubenswrapper[4869]: I0929 15:02:21.622050 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zh29p" event={"ID":"e6af4d73-f96a-43fc-b397-cc786c200a7b","Type":"ContainerStarted","Data":"0444befdea4491816698c1334f6cb4afcc962745d44bf82dc156969040e21abe"} Sep 29 15:02:22 crc kubenswrapper[4869]: I0929 15:02:22.635243 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zh29p" event={"ID":"e6af4d73-f96a-43fc-b397-cc786c200a7b","Type":"ContainerStarted","Data":"914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259"} Sep 29 15:02:24 crc kubenswrapper[4869]: I0929 15:02:24.654442 4869 generic.go:334] "Generic (PLEG): container finished" podID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerID="914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259" exitCode=0 Sep 29 15:02:24 crc kubenswrapper[4869]: I0929 15:02:24.656488 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zh29p" event={"ID":"e6af4d73-f96a-43fc-b397-cc786c200a7b","Type":"ContainerDied","Data":"914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259"} Sep 29 15:02:25 crc kubenswrapper[4869]: I0929 15:02:25.668652 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zh29p" event={"ID":"e6af4d73-f96a-43fc-b397-cc786c200a7b","Type":"ContainerStarted","Data":"4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da"} Sep 29 15:02:25 crc kubenswrapper[4869]: I0929 15:02:25.692261 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zh29p" podStartSLOduration=3.140101973 podStartE2EDuration="6.692237431s" podCreationTimestamp="2025-09-29 15:02:19 +0000 UTC" firstStartedPulling="2025-09-29 15:02:21.623906327 +0000 UTC m=+4868.064550657" lastFinishedPulling="2025-09-29 15:02:25.176041775 +0000 UTC m=+4871.616686115" observedRunningTime="2025-09-29 15:02:25.688460932 +0000 UTC m=+4872.129105252" watchObservedRunningTime="2025-09-29 15:02:25.692237431 +0000 UTC m=+4872.132881751" Sep 29 15:02:30 crc kubenswrapper[4869]: I0929 15:02:30.071477 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:30 crc kubenswrapper[4869]: I0929 15:02:30.072210 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:30 crc kubenswrapper[4869]: I0929 15:02:30.128471 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:30 crc kubenswrapper[4869]: I0929 15:02:30.781855 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:30 crc kubenswrapper[4869]: I0929 15:02:30.838483 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zh29p"] Sep 29 15:02:32 crc kubenswrapper[4869]: I0929 15:02:32.750644 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zh29p" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerName="registry-server" containerID="cri-o://4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da" gracePeriod=2 Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.259815 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.367716 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-catalog-content\") pod \"e6af4d73-f96a-43fc-b397-cc786c200a7b\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.367942 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-utilities\") pod \"e6af4d73-f96a-43fc-b397-cc786c200a7b\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.368074 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4hp5\" (UniqueName: \"kubernetes.io/projected/e6af4d73-f96a-43fc-b397-cc786c200a7b-kube-api-access-p4hp5\") pod \"e6af4d73-f96a-43fc-b397-cc786c200a7b\" (UID: \"e6af4d73-f96a-43fc-b397-cc786c200a7b\") " Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.368934 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-utilities" (OuterVolumeSpecName: "utilities") pod "e6af4d73-f96a-43fc-b397-cc786c200a7b" (UID: "e6af4d73-f96a-43fc-b397-cc786c200a7b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.376127 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6af4d73-f96a-43fc-b397-cc786c200a7b-kube-api-access-p4hp5" (OuterVolumeSpecName: "kube-api-access-p4hp5") pod "e6af4d73-f96a-43fc-b397-cc786c200a7b" (UID: "e6af4d73-f96a-43fc-b397-cc786c200a7b"). InnerVolumeSpecName "kube-api-access-p4hp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.454679 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e6af4d73-f96a-43fc-b397-cc786c200a7b" (UID: "e6af4d73-f96a-43fc-b397-cc786c200a7b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.470937 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.470980 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4hp5\" (UniqueName: \"kubernetes.io/projected/e6af4d73-f96a-43fc-b397-cc786c200a7b-kube-api-access-p4hp5\") on node \"crc\" DevicePath \"\"" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.470993 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6af4d73-f96a-43fc-b397-cc786c200a7b-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.762061 4869 generic.go:334] "Generic (PLEG): container finished" podID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerID="4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da" exitCode=0 Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.762109 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zh29p" event={"ID":"e6af4d73-f96a-43fc-b397-cc786c200a7b","Type":"ContainerDied","Data":"4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da"} Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.762139 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zh29p" event={"ID":"e6af4d73-f96a-43fc-b397-cc786c200a7b","Type":"ContainerDied","Data":"0444befdea4491816698c1334f6cb4afcc962745d44bf82dc156969040e21abe"} Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.762159 4869 scope.go:117] "RemoveContainer" containerID="4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.762309 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zh29p" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.807958 4869 scope.go:117] "RemoveContainer" containerID="914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.809409 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zh29p"] Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.820877 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zh29p"] Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.834527 4869 scope.go:117] "RemoveContainer" containerID="8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.890988 4869 scope.go:117] "RemoveContainer" containerID="4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da" Sep 29 15:02:33 crc kubenswrapper[4869]: E0929 15:02:33.891503 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da\": container with ID starting with 4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da not found: ID does not exist" containerID="4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.891535 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da"} err="failed to get container status \"4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da\": rpc error: code = NotFound desc = could not find container \"4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da\": container with ID starting with 4b0f2d85044592bbf2d74ef7c2ece918c45129e25ee956dad47f15c89781e3da not found: ID does not exist" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.891560 4869 scope.go:117] "RemoveContainer" containerID="914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259" Sep 29 15:02:33 crc kubenswrapper[4869]: E0929 15:02:33.892032 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259\": container with ID starting with 914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259 not found: ID does not exist" containerID="914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.892057 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259"} err="failed to get container status \"914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259\": rpc error: code = NotFound desc = could not find container \"914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259\": container with ID starting with 914465923b0a9bbdeff8ce05238d6bdeeb48e58ca59694b8d07d974ebdccb259 not found: ID does not exist" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.892071 4869 scope.go:117] "RemoveContainer" containerID="8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032" Sep 29 15:02:33 crc kubenswrapper[4869]: E0929 15:02:33.892297 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032\": container with ID starting with 8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032 not found: ID does not exist" containerID="8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032" Sep 29 15:02:33 crc kubenswrapper[4869]: I0929 15:02:33.892379 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032"} err="failed to get container status \"8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032\": rpc error: code = NotFound desc = could not find container \"8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032\": container with ID starting with 8998ccba3cba3638940f5caf8175caefdf4cab07e0a9f7d943a63f495046c032 not found: ID does not exist" Sep 29 15:02:34 crc kubenswrapper[4869]: I0929 15:02:34.255558 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" path="/var/lib/kubelet/pods/e6af4d73-f96a-43fc-b397-cc786c200a7b/volumes" Sep 29 15:02:50 crc kubenswrapper[4869]: I0929 15:02:50.657493 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:02:50 crc kubenswrapper[4869]: I0929 15:02:50.658547 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:03:20 crc kubenswrapper[4869]: I0929 15:03:20.657028 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:03:20 crc kubenswrapper[4869]: I0929 15:03:20.657725 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:03:20 crc kubenswrapper[4869]: I0929 15:03:20.657794 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 15:03:20 crc kubenswrapper[4869]: I0929 15:03:20.658876 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d029a17436a6b6c78d99602e949e766185d1288952f3500ecd4e16464b5ff926"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 15:03:20 crc kubenswrapper[4869]: I0929 15:03:20.658950 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://d029a17436a6b6c78d99602e949e766185d1288952f3500ecd4e16464b5ff926" gracePeriod=600 Sep 29 15:03:21 crc kubenswrapper[4869]: I0929 15:03:21.252980 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="d029a17436a6b6c78d99602e949e766185d1288952f3500ecd4e16464b5ff926" exitCode=0 Sep 29 15:03:21 crc kubenswrapper[4869]: I0929 15:03:21.253081 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"d029a17436a6b6c78d99602e949e766185d1288952f3500ecd4e16464b5ff926"} Sep 29 15:03:21 crc kubenswrapper[4869]: I0929 15:03:21.253311 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6"} Sep 29 15:03:21 crc kubenswrapper[4869]: I0929 15:03:21.253338 4869 scope.go:117] "RemoveContainer" containerID="65d3631581b8be2dcd0e196251ccc5e65fac6e81a327b99437c5aa8be788fe39" Sep 29 15:05:20 crc kubenswrapper[4869]: I0929 15:05:20.657150 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:05:20 crc kubenswrapper[4869]: I0929 15:05:20.657660 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:05:50 crc kubenswrapper[4869]: I0929 15:05:50.657490 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:05:50 crc kubenswrapper[4869]: I0929 15:05:50.658114 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:06:20 crc kubenswrapper[4869]: I0929 15:06:20.657538 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:06:20 crc kubenswrapper[4869]: I0929 15:06:20.658154 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:06:20 crc kubenswrapper[4869]: I0929 15:06:20.658224 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 15:06:20 crc kubenswrapper[4869]: I0929 15:06:20.659315 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 15:06:20 crc kubenswrapper[4869]: I0929 15:06:20.659397 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" gracePeriod=600 Sep 29 15:06:20 crc kubenswrapper[4869]: E0929 15:06:20.783304 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:06:21 crc kubenswrapper[4869]: I0929 15:06:21.172599 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" exitCode=0 Sep 29 15:06:21 crc kubenswrapper[4869]: I0929 15:06:21.172653 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6"} Sep 29 15:06:21 crc kubenswrapper[4869]: I0929 15:06:21.172706 4869 scope.go:117] "RemoveContainer" containerID="d029a17436a6b6c78d99602e949e766185d1288952f3500ecd4e16464b5ff926" Sep 29 15:06:21 crc kubenswrapper[4869]: I0929 15:06:21.173402 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:06:21 crc kubenswrapper[4869]: E0929 15:06:21.173963 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:06:32 crc kubenswrapper[4869]: I0929 15:06:32.243266 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:06:32 crc kubenswrapper[4869]: E0929 15:06:32.246289 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:06:46 crc kubenswrapper[4869]: I0929 15:06:46.242473 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:06:46 crc kubenswrapper[4869]: E0929 15:06:46.244497 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:07:01 crc kubenswrapper[4869]: I0929 15:07:01.242945 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:07:01 crc kubenswrapper[4869]: E0929 15:07:01.243741 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:07:12 crc kubenswrapper[4869]: I0929 15:07:12.242120 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:07:12 crc kubenswrapper[4869]: E0929 15:07:12.243063 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:07:23 crc kubenswrapper[4869]: I0929 15:07:23.242227 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:07:23 crc kubenswrapper[4869]: E0929 15:07:23.243100 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:07:38 crc kubenswrapper[4869]: I0929 15:07:38.242080 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:07:38 crc kubenswrapper[4869]: E0929 15:07:38.242992 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:07:51 crc kubenswrapper[4869]: I0929 15:07:51.243007 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:07:51 crc kubenswrapper[4869]: E0929 15:07:51.244170 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:08:03 crc kubenswrapper[4869]: I0929 15:08:03.242301 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:08:03 crc kubenswrapper[4869]: E0929 15:08:03.243247 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:08:17 crc kubenswrapper[4869]: I0929 15:08:17.242151 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:08:17 crc kubenswrapper[4869]: E0929 15:08:17.243589 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:08:30 crc kubenswrapper[4869]: I0929 15:08:30.242275 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:08:30 crc kubenswrapper[4869]: E0929 15:08:30.243177 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:08:43 crc kubenswrapper[4869]: I0929 15:08:43.242436 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:08:43 crc kubenswrapper[4869]: E0929 15:08:43.243213 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:08:56 crc kubenswrapper[4869]: I0929 15:08:56.948497 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qx6n9"] Sep 29 15:08:56 crc kubenswrapper[4869]: E0929 15:08:56.949557 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerName="extract-utilities" Sep 29 15:08:56 crc kubenswrapper[4869]: I0929 15:08:56.949578 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerName="extract-utilities" Sep 29 15:08:56 crc kubenswrapper[4869]: E0929 15:08:56.949604 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerName="registry-server" Sep 29 15:08:56 crc kubenswrapper[4869]: I0929 15:08:56.949632 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerName="registry-server" Sep 29 15:08:56 crc kubenswrapper[4869]: E0929 15:08:56.949677 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerName="extract-content" Sep 29 15:08:56 crc kubenswrapper[4869]: I0929 15:08:56.949685 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerName="extract-content" Sep 29 15:08:56 crc kubenswrapper[4869]: I0929 15:08:56.949918 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6af4d73-f96a-43fc-b397-cc786c200a7b" containerName="registry-server" Sep 29 15:08:56 crc kubenswrapper[4869]: I0929 15:08:56.951892 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:56 crc kubenswrapper[4869]: I0929 15:08:56.976134 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qx6n9"] Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.039351 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-utilities\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.039436 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27qcz\" (UniqueName: \"kubernetes.io/projected/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-kube-api-access-27qcz\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.039493 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-catalog-content\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.141572 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-utilities\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.141951 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27qcz\" (UniqueName: \"kubernetes.io/projected/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-kube-api-access-27qcz\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.142080 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-utilities\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.142562 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-catalog-content\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.142906 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-catalog-content\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.161353 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27qcz\" (UniqueName: \"kubernetes.io/projected/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-kube-api-access-27qcz\") pod \"redhat-marketplace-qx6n9\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.242203 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:08:57 crc kubenswrapper[4869]: E0929 15:08:57.242490 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.272510 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:08:57 crc kubenswrapper[4869]: I0929 15:08:57.861134 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qx6n9"] Sep 29 15:08:57 crc kubenswrapper[4869]: W0929 15:08:57.867900 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a7ef332_c4f3_4183_a163_9674cc8c4a8c.slice/crio-66effacc1a5f162c89242f9f60b3ded8ba83f9cb868e1d0096ebeea5d4aa7422 WatchSource:0}: Error finding container 66effacc1a5f162c89242f9f60b3ded8ba83f9cb868e1d0096ebeea5d4aa7422: Status 404 returned error can't find the container with id 66effacc1a5f162c89242f9f60b3ded8ba83f9cb868e1d0096ebeea5d4aa7422 Sep 29 15:08:58 crc kubenswrapper[4869]: I0929 15:08:58.821730 4869 generic.go:334] "Generic (PLEG): container finished" podID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerID="2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996" exitCode=0 Sep 29 15:08:58 crc kubenswrapper[4869]: I0929 15:08:58.821819 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qx6n9" event={"ID":"4a7ef332-c4f3-4183-a163-9674cc8c4a8c","Type":"ContainerDied","Data":"2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996"} Sep 29 15:08:58 crc kubenswrapper[4869]: I0929 15:08:58.822034 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qx6n9" event={"ID":"4a7ef332-c4f3-4183-a163-9674cc8c4a8c","Type":"ContainerStarted","Data":"66effacc1a5f162c89242f9f60b3ded8ba83f9cb868e1d0096ebeea5d4aa7422"} Sep 29 15:08:58 crc kubenswrapper[4869]: I0929 15:08:58.824593 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 15:08:59 crc kubenswrapper[4869]: I0929 15:08:59.833875 4869 generic.go:334] "Generic (PLEG): container finished" podID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerID="accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb" exitCode=0 Sep 29 15:08:59 crc kubenswrapper[4869]: I0929 15:08:59.834002 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qx6n9" event={"ID":"4a7ef332-c4f3-4183-a163-9674cc8c4a8c","Type":"ContainerDied","Data":"accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb"} Sep 29 15:09:00 crc kubenswrapper[4869]: I0929 15:09:00.847087 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qx6n9" event={"ID":"4a7ef332-c4f3-4183-a163-9674cc8c4a8c","Type":"ContainerStarted","Data":"e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160"} Sep 29 15:09:00 crc kubenswrapper[4869]: I0929 15:09:00.869656 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qx6n9" podStartSLOduration=3.467847987 podStartE2EDuration="4.869635004s" podCreationTimestamp="2025-09-29 15:08:56 +0000 UTC" firstStartedPulling="2025-09-29 15:08:58.824255903 +0000 UTC m=+5265.264900233" lastFinishedPulling="2025-09-29 15:09:00.22604293 +0000 UTC m=+5266.666687250" observedRunningTime="2025-09-29 15:09:00.865774082 +0000 UTC m=+5267.306418402" watchObservedRunningTime="2025-09-29 15:09:00.869635004 +0000 UTC m=+5267.310279324" Sep 29 15:09:07 crc kubenswrapper[4869]: I0929 15:09:07.272679 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:09:07 crc kubenswrapper[4869]: I0929 15:09:07.273244 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:09:07 crc kubenswrapper[4869]: I0929 15:09:07.322249 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:09:07 crc kubenswrapper[4869]: I0929 15:09:07.992415 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:09:08 crc kubenswrapper[4869]: I0929 15:09:08.051246 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qx6n9"] Sep 29 15:09:09 crc kubenswrapper[4869]: I0929 15:09:09.945104 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qx6n9" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerName="registry-server" containerID="cri-o://e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160" gracePeriod=2 Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.495746 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.685719 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27qcz\" (UniqueName: \"kubernetes.io/projected/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-kube-api-access-27qcz\") pod \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.685976 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-utilities\") pod \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.686049 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-catalog-content\") pod \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\" (UID: \"4a7ef332-c4f3-4183-a163-9674cc8c4a8c\") " Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.687100 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-utilities" (OuterVolumeSpecName: "utilities") pod "4a7ef332-c4f3-4183-a163-9674cc8c4a8c" (UID: "4a7ef332-c4f3-4183-a163-9674cc8c4a8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.692925 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-kube-api-access-27qcz" (OuterVolumeSpecName: "kube-api-access-27qcz") pod "4a7ef332-c4f3-4183-a163-9674cc8c4a8c" (UID: "4a7ef332-c4f3-4183-a163-9674cc8c4a8c"). InnerVolumeSpecName "kube-api-access-27qcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.704419 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a7ef332-c4f3-4183-a163-9674cc8c4a8c" (UID: "4a7ef332-c4f3-4183-a163-9674cc8c4a8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.788537 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.788575 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.788586 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27qcz\" (UniqueName: \"kubernetes.io/projected/4a7ef332-c4f3-4183-a163-9674cc8c4a8c-kube-api-access-27qcz\") on node \"crc\" DevicePath \"\"" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.957735 4869 generic.go:334] "Generic (PLEG): container finished" podID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerID="e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160" exitCode=0 Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.957786 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qx6n9" event={"ID":"4a7ef332-c4f3-4183-a163-9674cc8c4a8c","Type":"ContainerDied","Data":"e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160"} Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.957798 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qx6n9" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.957820 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qx6n9" event={"ID":"4a7ef332-c4f3-4183-a163-9674cc8c4a8c","Type":"ContainerDied","Data":"66effacc1a5f162c89242f9f60b3ded8ba83f9cb868e1d0096ebeea5d4aa7422"} Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.957846 4869 scope.go:117] "RemoveContainer" containerID="e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160" Sep 29 15:09:10 crc kubenswrapper[4869]: I0929 15:09:10.983378 4869 scope.go:117] "RemoveContainer" containerID="accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb" Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.030145 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qx6n9"] Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.043973 4869 scope.go:117] "RemoveContainer" containerID="2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996" Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.050828 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qx6n9"] Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.065816 4869 scope.go:117] "RemoveContainer" containerID="e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160" Sep 29 15:09:11 crc kubenswrapper[4869]: E0929 15:09:11.066469 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160\": container with ID starting with e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160 not found: ID does not exist" containerID="e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160" Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.066517 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160"} err="failed to get container status \"e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160\": rpc error: code = NotFound desc = could not find container \"e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160\": container with ID starting with e501222d6eb2d68d3ba8ce7dadbd43ac6a107d746579f493d9b606c3d6169160 not found: ID does not exist" Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.066551 4869 scope.go:117] "RemoveContainer" containerID="accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb" Sep 29 15:09:11 crc kubenswrapper[4869]: E0929 15:09:11.066877 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb\": container with ID starting with accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb not found: ID does not exist" containerID="accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb" Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.066930 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb"} err="failed to get container status \"accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb\": rpc error: code = NotFound desc = could not find container \"accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb\": container with ID starting with accab92cc71200b42dfaf3effff93c478cd31923d7edf412ab6e10ffc66bc6bb not found: ID does not exist" Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.066966 4869 scope.go:117] "RemoveContainer" containerID="2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996" Sep 29 15:09:11 crc kubenswrapper[4869]: E0929 15:09:11.067322 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996\": container with ID starting with 2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996 not found: ID does not exist" containerID="2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996" Sep 29 15:09:11 crc kubenswrapper[4869]: I0929 15:09:11.067382 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996"} err="failed to get container status \"2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996\": rpc error: code = NotFound desc = could not find container \"2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996\": container with ID starting with 2ec9c59703c223b6cb691777d72971856b41ac758903e40c6f73c1a831eec996 not found: ID does not exist" Sep 29 15:09:12 crc kubenswrapper[4869]: I0929 15:09:12.242440 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:09:12 crc kubenswrapper[4869]: E0929 15:09:12.242868 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:09:12 crc kubenswrapper[4869]: I0929 15:09:12.257516 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" path="/var/lib/kubelet/pods/4a7ef332-c4f3-4183-a163-9674cc8c4a8c/volumes" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.533170 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wptxp"] Sep 29 15:09:24 crc kubenswrapper[4869]: E0929 15:09:24.534245 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerName="extract-content" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.534266 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerName="extract-content" Sep 29 15:09:24 crc kubenswrapper[4869]: E0929 15:09:24.534295 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerName="extract-utilities" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.534307 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerName="extract-utilities" Sep 29 15:09:24 crc kubenswrapper[4869]: E0929 15:09:24.534369 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerName="registry-server" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.534379 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerName="registry-server" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.534678 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a7ef332-c4f3-4183-a163-9674cc8c4a8c" containerName="registry-server" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.536912 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.545941 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wptxp"] Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.715321 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-utilities\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.715424 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-catalog-content\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.715529 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4pvd\" (UniqueName: \"kubernetes.io/projected/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-kube-api-access-p4pvd\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.817747 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-catalog-content\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.818207 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4pvd\" (UniqueName: \"kubernetes.io/projected/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-kube-api-access-p4pvd\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.818416 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-catalog-content\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.818516 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-utilities\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.819032 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-utilities\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.844838 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4pvd\" (UniqueName: \"kubernetes.io/projected/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-kube-api-access-p4pvd\") pod \"certified-operators-wptxp\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:24 crc kubenswrapper[4869]: I0929 15:09:24.873415 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:25 crc kubenswrapper[4869]: I0929 15:09:25.243490 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:09:25 crc kubenswrapper[4869]: E0929 15:09:25.244141 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:09:25 crc kubenswrapper[4869]: I0929 15:09:25.434241 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wptxp"] Sep 29 15:09:26 crc kubenswrapper[4869]: I0929 15:09:26.164081 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wptxp" event={"ID":"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0","Type":"ContainerStarted","Data":"b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0"} Sep 29 15:09:26 crc kubenswrapper[4869]: I0929 15:09:26.164413 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wptxp" event={"ID":"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0","Type":"ContainerStarted","Data":"3c3f6062308bd44da98d040f10942875cb1b5badd9038d4989fc129182d0f1b5"} Sep 29 15:09:27 crc kubenswrapper[4869]: I0929 15:09:27.175644 4869 generic.go:334] "Generic (PLEG): container finished" podID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerID="b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0" exitCode=0 Sep 29 15:09:27 crc kubenswrapper[4869]: I0929 15:09:27.175730 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wptxp" event={"ID":"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0","Type":"ContainerDied","Data":"b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0"} Sep 29 15:09:29 crc kubenswrapper[4869]: I0929 15:09:29.199389 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wptxp" event={"ID":"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0","Type":"ContainerStarted","Data":"69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2"} Sep 29 15:09:33 crc kubenswrapper[4869]: I0929 15:09:33.246214 4869 generic.go:334] "Generic (PLEG): container finished" podID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerID="69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2" exitCode=0 Sep 29 15:09:33 crc kubenswrapper[4869]: I0929 15:09:33.246316 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wptxp" event={"ID":"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0","Type":"ContainerDied","Data":"69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2"} Sep 29 15:09:35 crc kubenswrapper[4869]: I0929 15:09:35.275541 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wptxp" event={"ID":"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0","Type":"ContainerStarted","Data":"9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa"} Sep 29 15:09:35 crc kubenswrapper[4869]: I0929 15:09:35.299341 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wptxp" podStartSLOduration=4.3079714639999995 podStartE2EDuration="11.299319257s" podCreationTimestamp="2025-09-29 15:09:24 +0000 UTC" firstStartedPulling="2025-09-29 15:09:27.178182415 +0000 UTC m=+5293.618826745" lastFinishedPulling="2025-09-29 15:09:34.169530218 +0000 UTC m=+5300.610174538" observedRunningTime="2025-09-29 15:09:35.295533747 +0000 UTC m=+5301.736178057" watchObservedRunningTime="2025-09-29 15:09:35.299319257 +0000 UTC m=+5301.739963577" Sep 29 15:09:40 crc kubenswrapper[4869]: I0929 15:09:40.242409 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:09:40 crc kubenswrapper[4869]: E0929 15:09:40.243215 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:09:44 crc kubenswrapper[4869]: I0929 15:09:44.873531 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:44 crc kubenswrapper[4869]: I0929 15:09:44.874118 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:44 crc kubenswrapper[4869]: I0929 15:09:44.928632 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:45 crc kubenswrapper[4869]: I0929 15:09:45.423488 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:45 crc kubenswrapper[4869]: I0929 15:09:45.475946 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wptxp"] Sep 29 15:09:47 crc kubenswrapper[4869]: I0929 15:09:47.393722 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wptxp" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerName="registry-server" containerID="cri-o://9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa" gracePeriod=2 Sep 29 15:09:47 crc kubenswrapper[4869]: I0929 15:09:47.852794 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:47 crc kubenswrapper[4869]: I0929 15:09:47.991319 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-catalog-content\") pod \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " Sep 29 15:09:47 crc kubenswrapper[4869]: I0929 15:09:47.991510 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-utilities\") pod \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " Sep 29 15:09:47 crc kubenswrapper[4869]: I0929 15:09:47.991684 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4pvd\" (UniqueName: \"kubernetes.io/projected/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-kube-api-access-p4pvd\") pod \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\" (UID: \"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0\") " Sep 29 15:09:47 crc kubenswrapper[4869]: I0929 15:09:47.992082 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-utilities" (OuterVolumeSpecName: "utilities") pod "aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" (UID: "aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:09:47 crc kubenswrapper[4869]: I0929 15:09:47.992384 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:09:47 crc kubenswrapper[4869]: I0929 15:09:47.998111 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-kube-api-access-p4pvd" (OuterVolumeSpecName: "kube-api-access-p4pvd") pod "aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" (UID: "aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0"). InnerVolumeSpecName "kube-api-access-p4pvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.037327 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" (UID: "aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.095291 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.096374 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4pvd\" (UniqueName: \"kubernetes.io/projected/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0-kube-api-access-p4pvd\") on node \"crc\" DevicePath \"\"" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.407575 4869 generic.go:334] "Generic (PLEG): container finished" podID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerID="9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa" exitCode=0 Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.407667 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wptxp" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.407665 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wptxp" event={"ID":"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0","Type":"ContainerDied","Data":"9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa"} Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.407763 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wptxp" event={"ID":"aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0","Type":"ContainerDied","Data":"3c3f6062308bd44da98d040f10942875cb1b5badd9038d4989fc129182d0f1b5"} Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.407785 4869 scope.go:117] "RemoveContainer" containerID="9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.431623 4869 scope.go:117] "RemoveContainer" containerID="69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.438353 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wptxp"] Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.448896 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wptxp"] Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.467811 4869 scope.go:117] "RemoveContainer" containerID="b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.525381 4869 scope.go:117] "RemoveContainer" containerID="9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa" Sep 29 15:09:48 crc kubenswrapper[4869]: E0929 15:09:48.526124 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa\": container with ID starting with 9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa not found: ID does not exist" containerID="9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.526224 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa"} err="failed to get container status \"9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa\": rpc error: code = NotFound desc = could not find container \"9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa\": container with ID starting with 9556a3877e15ac55aa5024d5c378f574a1375469e3c334950a578b0c60f69faa not found: ID does not exist" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.526267 4869 scope.go:117] "RemoveContainer" containerID="69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2" Sep 29 15:09:48 crc kubenswrapper[4869]: E0929 15:09:48.526741 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2\": container with ID starting with 69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2 not found: ID does not exist" containerID="69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.526782 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2"} err="failed to get container status \"69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2\": rpc error: code = NotFound desc = could not find container \"69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2\": container with ID starting with 69aa289d0d9a9355b11cdd82770e1117cd8d2dbd566c98237cb0109388da99f2 not found: ID does not exist" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.526813 4869 scope.go:117] "RemoveContainer" containerID="b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0" Sep 29 15:09:48 crc kubenswrapper[4869]: E0929 15:09:48.527402 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0\": container with ID starting with b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0 not found: ID does not exist" containerID="b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0" Sep 29 15:09:48 crc kubenswrapper[4869]: I0929 15:09:48.527434 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0"} err="failed to get container status \"b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0\": rpc error: code = NotFound desc = could not find container \"b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0\": container with ID starting with b444e8119cf6a8f7044bf23a9ed142493e3e3cf2498df918078e9f2ed99ab9a0 not found: ID does not exist" Sep 29 15:09:50 crc kubenswrapper[4869]: I0929 15:09:50.253156 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" path="/var/lib/kubelet/pods/aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0/volumes" Sep 29 15:09:51 crc kubenswrapper[4869]: I0929 15:09:51.242248 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:09:51 crc kubenswrapper[4869]: E0929 15:09:51.242867 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:10:03 crc kubenswrapper[4869]: I0929 15:10:03.243392 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:10:03 crc kubenswrapper[4869]: E0929 15:10:03.244781 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:10:15 crc kubenswrapper[4869]: I0929 15:10:15.242352 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:10:15 crc kubenswrapper[4869]: E0929 15:10:15.243073 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:10:26 crc kubenswrapper[4869]: I0929 15:10:26.242524 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:10:26 crc kubenswrapper[4869]: E0929 15:10:26.243754 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:10:40 crc kubenswrapper[4869]: I0929 15:10:40.241803 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:10:40 crc kubenswrapper[4869]: E0929 15:10:40.242599 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:10:52 crc kubenswrapper[4869]: I0929 15:10:52.242128 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:10:52 crc kubenswrapper[4869]: E0929 15:10:52.243045 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:11:03 crc kubenswrapper[4869]: I0929 15:11:03.242982 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:11:03 crc kubenswrapper[4869]: E0929 15:11:03.243929 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.528075 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jkrwj"] Sep 29 15:11:05 crc kubenswrapper[4869]: E0929 15:11:05.531461 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerName="registry-server" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.531600 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerName="registry-server" Sep 29 15:11:05 crc kubenswrapper[4869]: E0929 15:11:05.531708 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerName="extract-utilities" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.531789 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerName="extract-utilities" Sep 29 15:11:05 crc kubenswrapper[4869]: E0929 15:11:05.531898 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerName="extract-content" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.531979 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerName="extract-content" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.532423 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="aaf862bf-5bc4-4a7b-aeda-e0e5f8acc3c0" containerName="registry-server" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.535368 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.541411 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jkrwj"] Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.609603 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9g9j\" (UniqueName: \"kubernetes.io/projected/386517a6-babc-4451-abcf-6a97ccbaacf2-kube-api-access-p9g9j\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.609776 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-utilities\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.610003 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-catalog-content\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.712108 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9g9j\" (UniqueName: \"kubernetes.io/projected/386517a6-babc-4451-abcf-6a97ccbaacf2-kube-api-access-p9g9j\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.712482 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-utilities\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.712568 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-catalog-content\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.713180 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-catalog-content\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.713186 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-utilities\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.735134 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9g9j\" (UniqueName: \"kubernetes.io/projected/386517a6-babc-4451-abcf-6a97ccbaacf2-kube-api-access-p9g9j\") pod \"community-operators-jkrwj\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:05 crc kubenswrapper[4869]: I0929 15:11:05.862337 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:06 crc kubenswrapper[4869]: I0929 15:11:06.433753 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jkrwj"] Sep 29 15:11:07 crc kubenswrapper[4869]: I0929 15:11:07.237749 4869 generic.go:334] "Generic (PLEG): container finished" podID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerID="7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52" exitCode=0 Sep 29 15:11:07 crc kubenswrapper[4869]: I0929 15:11:07.237811 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkrwj" event={"ID":"386517a6-babc-4451-abcf-6a97ccbaacf2","Type":"ContainerDied","Data":"7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52"} Sep 29 15:11:07 crc kubenswrapper[4869]: I0929 15:11:07.238314 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkrwj" event={"ID":"386517a6-babc-4451-abcf-6a97ccbaacf2","Type":"ContainerStarted","Data":"4683a29da04c35c8f2981285720997e068527a100732996d676d53667b49bd52"} Sep 29 15:11:09 crc kubenswrapper[4869]: I0929 15:11:09.262241 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkrwj" event={"ID":"386517a6-babc-4451-abcf-6a97ccbaacf2","Type":"ContainerStarted","Data":"c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd"} Sep 29 15:11:10 crc kubenswrapper[4869]: I0929 15:11:10.274545 4869 generic.go:334] "Generic (PLEG): container finished" podID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerID="c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd" exitCode=0 Sep 29 15:11:10 crc kubenswrapper[4869]: I0929 15:11:10.274592 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkrwj" event={"ID":"386517a6-babc-4451-abcf-6a97ccbaacf2","Type":"ContainerDied","Data":"c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd"} Sep 29 15:11:11 crc kubenswrapper[4869]: I0929 15:11:11.287632 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkrwj" event={"ID":"386517a6-babc-4451-abcf-6a97ccbaacf2","Type":"ContainerStarted","Data":"7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c"} Sep 29 15:11:11 crc kubenswrapper[4869]: I0929 15:11:11.308493 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jkrwj" podStartSLOduration=2.552270474 podStartE2EDuration="6.308472068s" podCreationTimestamp="2025-09-29 15:11:05 +0000 UTC" firstStartedPulling="2025-09-29 15:11:07.24062683 +0000 UTC m=+5393.681271140" lastFinishedPulling="2025-09-29 15:11:10.996828414 +0000 UTC m=+5397.437472734" observedRunningTime="2025-09-29 15:11:11.305598422 +0000 UTC m=+5397.746242742" watchObservedRunningTime="2025-09-29 15:11:11.308472068 +0000 UTC m=+5397.749116388" Sep 29 15:11:15 crc kubenswrapper[4869]: I0929 15:11:15.864051 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:15 crc kubenswrapper[4869]: I0929 15:11:15.864662 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:15 crc kubenswrapper[4869]: I0929 15:11:15.929757 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:16 crc kubenswrapper[4869]: I0929 15:11:16.434775 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:16 crc kubenswrapper[4869]: I0929 15:11:16.491554 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jkrwj"] Sep 29 15:11:18 crc kubenswrapper[4869]: I0929 15:11:18.243006 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:11:18 crc kubenswrapper[4869]: E0929 15:11:18.243530 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:11:18 crc kubenswrapper[4869]: I0929 15:11:18.375102 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jkrwj" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerName="registry-server" containerID="cri-o://7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c" gracePeriod=2 Sep 29 15:11:18 crc kubenswrapper[4869]: I0929 15:11:18.951674 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.029880 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9g9j\" (UniqueName: \"kubernetes.io/projected/386517a6-babc-4451-abcf-6a97ccbaacf2-kube-api-access-p9g9j\") pod \"386517a6-babc-4451-abcf-6a97ccbaacf2\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.030053 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-catalog-content\") pod \"386517a6-babc-4451-abcf-6a97ccbaacf2\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.030196 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-utilities\") pod \"386517a6-babc-4451-abcf-6a97ccbaacf2\" (UID: \"386517a6-babc-4451-abcf-6a97ccbaacf2\") " Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.031225 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-utilities" (OuterVolumeSpecName: "utilities") pod "386517a6-babc-4451-abcf-6a97ccbaacf2" (UID: "386517a6-babc-4451-abcf-6a97ccbaacf2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.036232 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/386517a6-babc-4451-abcf-6a97ccbaacf2-kube-api-access-p9g9j" (OuterVolumeSpecName: "kube-api-access-p9g9j") pod "386517a6-babc-4451-abcf-6a97ccbaacf2" (UID: "386517a6-babc-4451-abcf-6a97ccbaacf2"). InnerVolumeSpecName "kube-api-access-p9g9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.083544 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "386517a6-babc-4451-abcf-6a97ccbaacf2" (UID: "386517a6-babc-4451-abcf-6a97ccbaacf2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.134061 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.134117 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386517a6-babc-4451-abcf-6a97ccbaacf2-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.134128 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9g9j\" (UniqueName: \"kubernetes.io/projected/386517a6-babc-4451-abcf-6a97ccbaacf2-kube-api-access-p9g9j\") on node \"crc\" DevicePath \"\"" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.392747 4869 generic.go:334] "Generic (PLEG): container finished" podID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerID="7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c" exitCode=0 Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.392836 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkrwj" event={"ID":"386517a6-babc-4451-abcf-6a97ccbaacf2","Type":"ContainerDied","Data":"7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c"} Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.392912 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkrwj" event={"ID":"386517a6-babc-4451-abcf-6a97ccbaacf2","Type":"ContainerDied","Data":"4683a29da04c35c8f2981285720997e068527a100732996d676d53667b49bd52"} Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.392961 4869 scope.go:117] "RemoveContainer" containerID="7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.392986 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkrwj" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.421974 4869 scope.go:117] "RemoveContainer" containerID="c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.450276 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jkrwj"] Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.455529 4869 scope.go:117] "RemoveContainer" containerID="7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.460387 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jkrwj"] Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.499122 4869 scope.go:117] "RemoveContainer" containerID="7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c" Sep 29 15:11:19 crc kubenswrapper[4869]: E0929 15:11:19.499824 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c\": container with ID starting with 7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c not found: ID does not exist" containerID="7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.499892 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c"} err="failed to get container status \"7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c\": rpc error: code = NotFound desc = could not find container \"7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c\": container with ID starting with 7f855467cce82c3ffc095360273f1a613332fc9ef9cac8b2fa1a9085f5a10f6c not found: ID does not exist" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.499939 4869 scope.go:117] "RemoveContainer" containerID="c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd" Sep 29 15:11:19 crc kubenswrapper[4869]: E0929 15:11:19.500513 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd\": container with ID starting with c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd not found: ID does not exist" containerID="c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.500595 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd"} err="failed to get container status \"c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd\": rpc error: code = NotFound desc = could not find container \"c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd\": container with ID starting with c59dad85c33e12a05f98df589fc62dc23f59d4140ed76acf2959bbfc1e717cfd not found: ID does not exist" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.500651 4869 scope.go:117] "RemoveContainer" containerID="7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52" Sep 29 15:11:19 crc kubenswrapper[4869]: E0929 15:11:19.501322 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52\": container with ID starting with 7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52 not found: ID does not exist" containerID="7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52" Sep 29 15:11:19 crc kubenswrapper[4869]: I0929 15:11:19.501372 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52"} err="failed to get container status \"7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52\": rpc error: code = NotFound desc = could not find container \"7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52\": container with ID starting with 7e2f12f7ac331bcdb76fc3976664dbe8fe9a86571501574a136e4dc3e4d03b52 not found: ID does not exist" Sep 29 15:11:20 crc kubenswrapper[4869]: I0929 15:11:20.255857 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" path="/var/lib/kubelet/pods/386517a6-babc-4451-abcf-6a97ccbaacf2/volumes" Sep 29 15:11:31 crc kubenswrapper[4869]: I0929 15:11:31.242237 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:11:31 crc kubenswrapper[4869]: I0929 15:11:31.516653 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"b23798510cac866fca7f8d4056d30d6040819ccda078a8ba7cbeeb54e28ea1f5"} Sep 29 15:12:02 crc kubenswrapper[4869]: E0929 15:12:02.498675 4869 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.80:38596->38.102.83.80:45233: write tcp 38.102.83.80:38596->38.102.83.80:45233: write: broken pipe Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.559970 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hgb8j"] Sep 29 15:12:47 crc kubenswrapper[4869]: E0929 15:12:47.564807 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerName="registry-server" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.564949 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerName="registry-server" Sep 29 15:12:47 crc kubenswrapper[4869]: E0929 15:12:47.565082 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerName="extract-utilities" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.565165 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerName="extract-utilities" Sep 29 15:12:47 crc kubenswrapper[4869]: E0929 15:12:47.565271 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerName="extract-content" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.565343 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerName="extract-content" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.565733 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="386517a6-babc-4451-abcf-6a97ccbaacf2" containerName="registry-server" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.567682 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.574851 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hgb8j"] Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.728292 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-utilities\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.728378 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-catalog-content\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.728809 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2q2s\" (UniqueName: \"kubernetes.io/projected/00e09175-2fce-41bb-b61b-14df789625f9-kube-api-access-p2q2s\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.831660 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-catalog-content\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.831835 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2q2s\" (UniqueName: \"kubernetes.io/projected/00e09175-2fce-41bb-b61b-14df789625f9-kube-api-access-p2q2s\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.831992 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-utilities\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.832756 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-catalog-content\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.832799 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-utilities\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.857814 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2q2s\" (UniqueName: \"kubernetes.io/projected/00e09175-2fce-41bb-b61b-14df789625f9-kube-api-access-p2q2s\") pod \"redhat-operators-hgb8j\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:47 crc kubenswrapper[4869]: I0929 15:12:47.904174 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:12:48 crc kubenswrapper[4869]: I0929 15:12:48.430180 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hgb8j"] Sep 29 15:12:49 crc kubenswrapper[4869]: I0929 15:12:49.348199 4869 generic.go:334] "Generic (PLEG): container finished" podID="00e09175-2fce-41bb-b61b-14df789625f9" containerID="0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4" exitCode=0 Sep 29 15:12:49 crc kubenswrapper[4869]: I0929 15:12:49.348333 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgb8j" event={"ID":"00e09175-2fce-41bb-b61b-14df789625f9","Type":"ContainerDied","Data":"0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4"} Sep 29 15:12:49 crc kubenswrapper[4869]: I0929 15:12:49.348973 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgb8j" event={"ID":"00e09175-2fce-41bb-b61b-14df789625f9","Type":"ContainerStarted","Data":"28d437a0cbf43ea21ed5e74b0a789962cf27a66edcbb85b6cf6c36c0e5aacbe4"} Sep 29 15:12:51 crc kubenswrapper[4869]: I0929 15:12:51.369142 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgb8j" event={"ID":"00e09175-2fce-41bb-b61b-14df789625f9","Type":"ContainerStarted","Data":"f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29"} Sep 29 15:12:54 crc kubenswrapper[4869]: I0929 15:12:54.413759 4869 generic.go:334] "Generic (PLEG): container finished" podID="00e09175-2fce-41bb-b61b-14df789625f9" containerID="f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29" exitCode=0 Sep 29 15:12:54 crc kubenswrapper[4869]: I0929 15:12:54.413853 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgb8j" event={"ID":"00e09175-2fce-41bb-b61b-14df789625f9","Type":"ContainerDied","Data":"f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29"} Sep 29 15:12:55 crc kubenswrapper[4869]: E0929 15:12:55.772793 4869 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = parsing image configuration: Get \"https://cdn01.quay.io/quayio-production-s3/sha256/b8/b856e4d37af238240aaa3504ebf72881a05d3e5875365377d4fbd3a313fe7d06?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIATAAF2YHTCKFFWO5C%2F20250929%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250929T151254Z&X-Amz-Expires=600&X-Amz-SignedHeaders=host&X-Amz-Signature=68e3417633d0449a5e10ae1b45e8e0f6f085d7f56aa93f6e534092c26023d611®ion=us-east-1&namespace=openshift-release-dev&username=openshift-release-dev+ocm_access_1b89217552bc42d1be3fb06a1aed001a&repo_name=ocp-v4.0-art-dev&akamai_signature=exp=1759159674~hmac=a9ea20db5bcf2f0f7770f6a9e30d812343906489b5597aafec7adb140d8ba605\": remote error: tls: internal error" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad" Sep 29 15:12:55 crc kubenswrapper[4869]: E0929 15:12:55.773540 4869 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad,Command:[/bin/opm],Args:[serve /extracted-catalog/catalog --cache-dir=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOMEMLIMIT,Value:30MiB,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{31457280 0} {} 30Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p2q2s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-hgb8j_openshift-marketplace(00e09175-2fce-41bb-b61b-14df789625f9): ErrImagePull: parsing image configuration: Get \"https://cdn01.quay.io/quayio-production-s3/sha256/b8/b856e4d37af238240aaa3504ebf72881a05d3e5875365377d4fbd3a313fe7d06?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIATAAF2YHTCKFFWO5C%2F20250929%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250929T151254Z&X-Amz-Expires=600&X-Amz-SignedHeaders=host&X-Amz-Signature=68e3417633d0449a5e10ae1b45e8e0f6f085d7f56aa93f6e534092c26023d611®ion=us-east-1&namespace=openshift-release-dev&username=openshift-release-dev+ocm_access_1b89217552bc42d1be3fb06a1aed001a&repo_name=ocp-v4.0-art-dev&akamai_signature=exp=1759159674~hmac=a9ea20db5bcf2f0f7770f6a9e30d812343906489b5597aafec7adb140d8ba605\": remote error: tls: internal error" logger="UnhandledError" Sep 29 15:12:55 crc kubenswrapper[4869]: E0929 15:12:55.774863 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"parsing image configuration: Get \\\"https://cdn01.quay.io/quayio-production-s3/sha256/b8/b856e4d37af238240aaa3504ebf72881a05d3e5875365377d4fbd3a313fe7d06?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIATAAF2YHTCKFFWO5C%2F20250929%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250929T151254Z&X-Amz-Expires=600&X-Amz-SignedHeaders=host&X-Amz-Signature=68e3417633d0449a5e10ae1b45e8e0f6f085d7f56aa93f6e534092c26023d611®ion=us-east-1&namespace=openshift-release-dev&username=openshift-release-dev+ocm_access_1b89217552bc42d1be3fb06a1aed001a&repo_name=ocp-v4.0-art-dev&akamai_signature=exp=1759159674~hmac=a9ea20db5bcf2f0f7770f6a9e30d812343906489b5597aafec7adb140d8ba605\\\": remote error: tls: internal error\"" pod="openshift-marketplace/redhat-operators-hgb8j" podUID="00e09175-2fce-41bb-b61b-14df789625f9" Sep 29 15:13:11 crc kubenswrapper[4869]: I0929 15:13:11.598786 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgb8j" event={"ID":"00e09175-2fce-41bb-b61b-14df789625f9","Type":"ContainerStarted","Data":"87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a"} Sep 29 15:13:11 crc kubenswrapper[4869]: I0929 15:13:11.637468 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hgb8j" podStartSLOduration=3.301956167 podStartE2EDuration="24.637442534s" podCreationTimestamp="2025-09-29 15:12:47 +0000 UTC" firstStartedPulling="2025-09-29 15:12:49.351105776 +0000 UTC m=+5495.791750096" lastFinishedPulling="2025-09-29 15:13:10.686592103 +0000 UTC m=+5517.127236463" observedRunningTime="2025-09-29 15:13:11.628106188 +0000 UTC m=+5518.068750528" watchObservedRunningTime="2025-09-29 15:13:11.637442534 +0000 UTC m=+5518.078086854" Sep 29 15:13:17 crc kubenswrapper[4869]: I0929 15:13:17.904676 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:13:17 crc kubenswrapper[4869]: I0929 15:13:17.905373 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:13:18 crc kubenswrapper[4869]: I0929 15:13:18.963233 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgb8j" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="registry-server" probeResult="failure" output=< Sep 29 15:13:18 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 15:13:18 crc kubenswrapper[4869]: > Sep 29 15:13:27 crc kubenswrapper[4869]: I0929 15:13:27.957720 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:13:28 crc kubenswrapper[4869]: I0929 15:13:28.010213 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:13:28 crc kubenswrapper[4869]: I0929 15:13:28.199633 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hgb8j"] Sep 29 15:13:29 crc kubenswrapper[4869]: I0929 15:13:29.783446 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hgb8j" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="registry-server" containerID="cri-o://87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a" gracePeriod=2 Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.289089 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.374857 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-utilities\") pod \"00e09175-2fce-41bb-b61b-14df789625f9\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.375188 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-catalog-content\") pod \"00e09175-2fce-41bb-b61b-14df789625f9\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.375309 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2q2s\" (UniqueName: \"kubernetes.io/projected/00e09175-2fce-41bb-b61b-14df789625f9-kube-api-access-p2q2s\") pod \"00e09175-2fce-41bb-b61b-14df789625f9\" (UID: \"00e09175-2fce-41bb-b61b-14df789625f9\") " Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.377553 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-utilities" (OuterVolumeSpecName: "utilities") pod "00e09175-2fce-41bb-b61b-14df789625f9" (UID: "00e09175-2fce-41bb-b61b-14df789625f9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.388834 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00e09175-2fce-41bb-b61b-14df789625f9-kube-api-access-p2q2s" (OuterVolumeSpecName: "kube-api-access-p2q2s") pod "00e09175-2fce-41bb-b61b-14df789625f9" (UID: "00e09175-2fce-41bb-b61b-14df789625f9"). InnerVolumeSpecName "kube-api-access-p2q2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.468425 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00e09175-2fce-41bb-b61b-14df789625f9" (UID: "00e09175-2fce-41bb-b61b-14df789625f9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.477913 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.477959 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00e09175-2fce-41bb-b61b-14df789625f9-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.477970 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2q2s\" (UniqueName: \"kubernetes.io/projected/00e09175-2fce-41bb-b61b-14df789625f9-kube-api-access-p2q2s\") on node \"crc\" DevicePath \"\"" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.798332 4869 generic.go:334] "Generic (PLEG): container finished" podID="00e09175-2fce-41bb-b61b-14df789625f9" containerID="87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a" exitCode=0 Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.798386 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgb8j" event={"ID":"00e09175-2fce-41bb-b61b-14df789625f9","Type":"ContainerDied","Data":"87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a"} Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.798422 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgb8j" event={"ID":"00e09175-2fce-41bb-b61b-14df789625f9","Type":"ContainerDied","Data":"28d437a0cbf43ea21ed5e74b0a789962cf27a66edcbb85b6cf6c36c0e5aacbe4"} Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.798445 4869 scope.go:117] "RemoveContainer" containerID="87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.798681 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgb8j" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.828503 4869 scope.go:117] "RemoveContainer" containerID="f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.845765 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hgb8j"] Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.867134 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hgb8j"] Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.896145 4869 scope.go:117] "RemoveContainer" containerID="0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.925927 4869 scope.go:117] "RemoveContainer" containerID="87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a" Sep 29 15:13:30 crc kubenswrapper[4869]: E0929 15:13:30.926280 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a\": container with ID starting with 87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a not found: ID does not exist" containerID="87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.926320 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a"} err="failed to get container status \"87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a\": rpc error: code = NotFound desc = could not find container \"87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a\": container with ID starting with 87eae761807dc185cbfe07840c1f0728ccf7b24712095e73fcd5de5a22bf9e2a not found: ID does not exist" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.926346 4869 scope.go:117] "RemoveContainer" containerID="f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29" Sep 29 15:13:30 crc kubenswrapper[4869]: E0929 15:13:30.926935 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29\": container with ID starting with f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29 not found: ID does not exist" containerID="f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.926964 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29"} err="failed to get container status \"f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29\": rpc error: code = NotFound desc = could not find container \"f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29\": container with ID starting with f47aee0c9a683f3d4def62279c5d57ce03c2d8dc0ba551a7e9f9b2e320e05d29 not found: ID does not exist" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.926982 4869 scope.go:117] "RemoveContainer" containerID="0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4" Sep 29 15:13:30 crc kubenswrapper[4869]: E0929 15:13:30.927371 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4\": container with ID starting with 0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4 not found: ID does not exist" containerID="0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4" Sep 29 15:13:30 crc kubenswrapper[4869]: I0929 15:13:30.927397 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4"} err="failed to get container status \"0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4\": rpc error: code = NotFound desc = could not find container \"0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4\": container with ID starting with 0a2fb9132891d3fe35eb9220a86c269c4c8a9c0b4f94eb8f4a00a6dfe03d95c4 not found: ID does not exist" Sep 29 15:13:32 crc kubenswrapper[4869]: I0929 15:13:32.253728 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00e09175-2fce-41bb-b61b-14df789625f9" path="/var/lib/kubelet/pods/00e09175-2fce-41bb-b61b-14df789625f9/volumes" Sep 29 15:13:50 crc kubenswrapper[4869]: I0929 15:13:50.656668 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:13:50 crc kubenswrapper[4869]: I0929 15:13:50.657345 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:14:20 crc kubenswrapper[4869]: I0929 15:14:20.656888 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:14:20 crc kubenswrapper[4869]: I0929 15:14:20.657525 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:14:50 crc kubenswrapper[4869]: I0929 15:14:50.657325 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:14:50 crc kubenswrapper[4869]: I0929 15:14:50.658120 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:14:50 crc kubenswrapper[4869]: I0929 15:14:50.658184 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 15:14:50 crc kubenswrapper[4869]: I0929 15:14:50.659120 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b23798510cac866fca7f8d4056d30d6040819ccda078a8ba7cbeeb54e28ea1f5"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 15:14:50 crc kubenswrapper[4869]: I0929 15:14:50.659170 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://b23798510cac866fca7f8d4056d30d6040819ccda078a8ba7cbeeb54e28ea1f5" gracePeriod=600 Sep 29 15:14:51 crc kubenswrapper[4869]: I0929 15:14:51.638715 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="b23798510cac866fca7f8d4056d30d6040819ccda078a8ba7cbeeb54e28ea1f5" exitCode=0 Sep 29 15:14:51 crc kubenswrapper[4869]: I0929 15:14:51.638802 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"b23798510cac866fca7f8d4056d30d6040819ccda078a8ba7cbeeb54e28ea1f5"} Sep 29 15:14:51 crc kubenswrapper[4869]: I0929 15:14:51.639320 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2"} Sep 29 15:14:51 crc kubenswrapper[4869]: I0929 15:14:51.639353 4869 scope.go:117] "RemoveContainer" containerID="af8499565eb487ed2998232ad5d2c655213b5596a951d751d6d8adf5f4736ae6" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.156217 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj"] Sep 29 15:15:00 crc kubenswrapper[4869]: E0929 15:15:00.157346 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="extract-utilities" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.157366 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="extract-utilities" Sep 29 15:15:00 crc kubenswrapper[4869]: E0929 15:15:00.157391 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="registry-server" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.157402 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="registry-server" Sep 29 15:15:00 crc kubenswrapper[4869]: E0929 15:15:00.157417 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="extract-content" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.157425 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="extract-content" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.157747 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="00e09175-2fce-41bb-b61b-14df789625f9" containerName="registry-server" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.158725 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.161164 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.161247 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.167349 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj"] Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.212216 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-secret-volume\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.212791 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqlnx\" (UniqueName: \"kubernetes.io/projected/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-kube-api-access-lqlnx\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.213041 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-config-volume\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.315334 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-config-volume\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.317412 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-secret-volume\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.317556 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqlnx\" (UniqueName: \"kubernetes.io/projected/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-kube-api-access-lqlnx\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.316339 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-config-volume\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.325513 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-secret-volume\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.338311 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqlnx\" (UniqueName: \"kubernetes.io/projected/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-kube-api-access-lqlnx\") pod \"collect-profiles-29319315-lxkmj\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.482160 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:00 crc kubenswrapper[4869]: I0929 15:15:00.984650 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj"] Sep 29 15:15:01 crc kubenswrapper[4869]: I0929 15:15:01.747843 4869 generic.go:334] "Generic (PLEG): container finished" podID="e0bd57dd-dbe0-4b0c-9a91-187d18d85d98" containerID="ed22994c2f50b25a3c7aeba5fa9fb27b4f7fb9d2c2b43710d645fea45317e656" exitCode=0 Sep 29 15:15:01 crc kubenswrapper[4869]: I0929 15:15:01.748006 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" event={"ID":"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98","Type":"ContainerDied","Data":"ed22994c2f50b25a3c7aeba5fa9fb27b4f7fb9d2c2b43710d645fea45317e656"} Sep 29 15:15:01 crc kubenswrapper[4869]: I0929 15:15:01.748183 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" event={"ID":"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98","Type":"ContainerStarted","Data":"65a6bd81792e29d47ee9e3f4c5290ad836254e75f98c2344d3c14089d717c0f7"} Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.120300 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.185702 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-config-volume\") pod \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.185818 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-secret-volume\") pod \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.185912 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqlnx\" (UniqueName: \"kubernetes.io/projected/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-kube-api-access-lqlnx\") pod \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\" (UID: \"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98\") " Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.189906 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-config-volume" (OuterVolumeSpecName: "config-volume") pod "e0bd57dd-dbe0-4b0c-9a91-187d18d85d98" (UID: "e0bd57dd-dbe0-4b0c-9a91-187d18d85d98"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.191577 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e0bd57dd-dbe0-4b0c-9a91-187d18d85d98" (UID: "e0bd57dd-dbe0-4b0c-9a91-187d18d85d98"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.288865 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.288906 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.291546 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-kube-api-access-lqlnx" (OuterVolumeSpecName: "kube-api-access-lqlnx") pod "e0bd57dd-dbe0-4b0c-9a91-187d18d85d98" (UID: "e0bd57dd-dbe0-4b0c-9a91-187d18d85d98"). InnerVolumeSpecName "kube-api-access-lqlnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.391842 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqlnx\" (UniqueName: \"kubernetes.io/projected/e0bd57dd-dbe0-4b0c-9a91-187d18d85d98-kube-api-access-lqlnx\") on node \"crc\" DevicePath \"\"" Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.768866 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" event={"ID":"e0bd57dd-dbe0-4b0c-9a91-187d18d85d98","Type":"ContainerDied","Data":"65a6bd81792e29d47ee9e3f4c5290ad836254e75f98c2344d3c14089d717c0f7"} Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.768925 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65a6bd81792e29d47ee9e3f4c5290ad836254e75f98c2344d3c14089d717c0f7" Sep 29 15:15:03 crc kubenswrapper[4869]: I0929 15:15:03.768966 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319315-lxkmj" Sep 29 15:15:04 crc kubenswrapper[4869]: I0929 15:15:04.197104 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf"] Sep 29 15:15:04 crc kubenswrapper[4869]: I0929 15:15:04.208215 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319270-mh9wf"] Sep 29 15:15:04 crc kubenswrapper[4869]: I0929 15:15:04.254991 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba0e62b3-4e01-4cdc-a093-4f1ea622500b" path="/var/lib/kubelet/pods/ba0e62b3-4e01-4cdc-a093-4f1ea622500b/volumes" Sep 29 15:15:25 crc kubenswrapper[4869]: I0929 15:15:25.402297 4869 scope.go:117] "RemoveContainer" containerID="956911aebfa0ec896e440466050edfa6f38ebc7ba9209e7d16651403abee6741" Sep 29 15:17:20 crc kubenswrapper[4869]: I0929 15:17:20.657742 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:17:20 crc kubenswrapper[4869]: I0929 15:17:20.660333 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:17:50 crc kubenswrapper[4869]: I0929 15:17:50.656943 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:17:50 crc kubenswrapper[4869]: I0929 15:17:50.657649 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.659705 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.660403 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.660481 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.661791 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.661895 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" gracePeriod=600 Sep 29 15:18:20 crc kubenswrapper[4869]: E0929 15:18:20.804784 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.927676 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" exitCode=0 Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.927743 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2"} Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.927805 4869 scope.go:117] "RemoveContainer" containerID="b23798510cac866fca7f8d4056d30d6040819ccda078a8ba7cbeeb54e28ea1f5" Sep 29 15:18:20 crc kubenswrapper[4869]: I0929 15:18:20.928823 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:18:20 crc kubenswrapper[4869]: E0929 15:18:20.929228 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:18:32 crc kubenswrapper[4869]: I0929 15:18:32.242334 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:18:32 crc kubenswrapper[4869]: E0929 15:18:32.243110 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:18:44 crc kubenswrapper[4869]: I0929 15:18:44.249945 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:18:44 crc kubenswrapper[4869]: E0929 15:18:44.250832 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:18:59 crc kubenswrapper[4869]: I0929 15:18:59.242170 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:18:59 crc kubenswrapper[4869]: E0929 15:18:59.242996 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:19:10 crc kubenswrapper[4869]: I0929 15:19:10.243029 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:19:10 crc kubenswrapper[4869]: E0929 15:19:10.244017 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:19:21 crc kubenswrapper[4869]: I0929 15:19:21.241846 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:19:21 crc kubenswrapper[4869]: E0929 15:19:21.242652 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:19:32 crc kubenswrapper[4869]: I0929 15:19:32.242353 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:19:32 crc kubenswrapper[4869]: E0929 15:19:32.243317 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:19:46 crc kubenswrapper[4869]: I0929 15:19:46.241637 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:19:46 crc kubenswrapper[4869]: E0929 15:19:46.242391 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.232868 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r6br4"] Sep 29 15:20:00 crc kubenswrapper[4869]: E0929 15:20:00.234027 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0bd57dd-dbe0-4b0c-9a91-187d18d85d98" containerName="collect-profiles" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.234044 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0bd57dd-dbe0-4b0c-9a91-187d18d85d98" containerName="collect-profiles" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.234301 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0bd57dd-dbe0-4b0c-9a91-187d18d85d98" containerName="collect-profiles" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.236211 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.245748 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:20:00 crc kubenswrapper[4869]: E0929 15:20:00.246195 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.276422 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r6br4"] Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.286969 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkc9q\" (UniqueName: \"kubernetes.io/projected/ad299bc3-758d-40bb-92d8-62599397a2a2-kube-api-access-jkc9q\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.287094 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-utilities\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.287421 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-catalog-content\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.389769 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-utilities\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.389985 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-catalog-content\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.390130 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkc9q\" (UniqueName: \"kubernetes.io/projected/ad299bc3-758d-40bb-92d8-62599397a2a2-kube-api-access-jkc9q\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.390402 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-utilities\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.390515 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-catalog-content\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.409682 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkc9q\" (UniqueName: \"kubernetes.io/projected/ad299bc3-758d-40bb-92d8-62599397a2a2-kube-api-access-jkc9q\") pod \"certified-operators-r6br4\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:00 crc kubenswrapper[4869]: I0929 15:20:00.567205 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:01 crc kubenswrapper[4869]: I0929 15:20:01.118875 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r6br4"] Sep 29 15:20:01 crc kubenswrapper[4869]: W0929 15:20:01.129342 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad299bc3_758d_40bb_92d8_62599397a2a2.slice/crio-306be134bc68181064d946296c5f045d5e867f8608aa0ef87a975cc38f6e1c3d WatchSource:0}: Error finding container 306be134bc68181064d946296c5f045d5e867f8608aa0ef87a975cc38f6e1c3d: Status 404 returned error can't find the container with id 306be134bc68181064d946296c5f045d5e867f8608aa0ef87a975cc38f6e1c3d Sep 29 15:20:02 crc kubenswrapper[4869]: I0929 15:20:02.001424 4869 generic.go:334] "Generic (PLEG): container finished" podID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerID="bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793" exitCode=0 Sep 29 15:20:02 crc kubenswrapper[4869]: I0929 15:20:02.001538 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6br4" event={"ID":"ad299bc3-758d-40bb-92d8-62599397a2a2","Type":"ContainerDied","Data":"bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793"} Sep 29 15:20:02 crc kubenswrapper[4869]: I0929 15:20:02.001734 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6br4" event={"ID":"ad299bc3-758d-40bb-92d8-62599397a2a2","Type":"ContainerStarted","Data":"306be134bc68181064d946296c5f045d5e867f8608aa0ef87a975cc38f6e1c3d"} Sep 29 15:20:02 crc kubenswrapper[4869]: I0929 15:20:02.005192 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 15:20:03 crc kubenswrapper[4869]: I0929 15:20:03.012499 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6br4" event={"ID":"ad299bc3-758d-40bb-92d8-62599397a2a2","Type":"ContainerStarted","Data":"be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed"} Sep 29 15:20:04 crc kubenswrapper[4869]: I0929 15:20:04.024759 4869 generic.go:334] "Generic (PLEG): container finished" podID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerID="be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed" exitCode=0 Sep 29 15:20:04 crc kubenswrapper[4869]: I0929 15:20:04.024877 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6br4" event={"ID":"ad299bc3-758d-40bb-92d8-62599397a2a2","Type":"ContainerDied","Data":"be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed"} Sep 29 15:20:05 crc kubenswrapper[4869]: I0929 15:20:05.036284 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6br4" event={"ID":"ad299bc3-758d-40bb-92d8-62599397a2a2","Type":"ContainerStarted","Data":"343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7"} Sep 29 15:20:05 crc kubenswrapper[4869]: I0929 15:20:05.061570 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r6br4" podStartSLOduration=2.55230326 podStartE2EDuration="5.06153898s" podCreationTimestamp="2025-09-29 15:20:00 +0000 UTC" firstStartedPulling="2025-09-29 15:20:02.004960177 +0000 UTC m=+5928.445604497" lastFinishedPulling="2025-09-29 15:20:04.514195897 +0000 UTC m=+5930.954840217" observedRunningTime="2025-09-29 15:20:05.05460509 +0000 UTC m=+5931.495249410" watchObservedRunningTime="2025-09-29 15:20:05.06153898 +0000 UTC m=+5931.502183300" Sep 29 15:20:10 crc kubenswrapper[4869]: I0929 15:20:10.568106 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:10 crc kubenswrapper[4869]: I0929 15:20:10.568721 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:10 crc kubenswrapper[4869]: I0929 15:20:10.618873 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:11 crc kubenswrapper[4869]: I0929 15:20:11.165820 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:11 crc kubenswrapper[4869]: I0929 15:20:11.217253 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r6br4"] Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.132873 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r6br4" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerName="registry-server" containerID="cri-o://343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7" gracePeriod=2 Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.282602 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ssr5x"] Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.285541 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.291455 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-catalog-content\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.291680 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-utilities\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.291758 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76p9n\" (UniqueName: \"kubernetes.io/projected/ff9dff72-d876-4e68-a472-60be8f6b1ad5-kube-api-access-76p9n\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.346535 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssr5x"] Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.394440 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-catalog-content\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.394619 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-utilities\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.394987 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76p9n\" (UniqueName: \"kubernetes.io/projected/ff9dff72-d876-4e68-a472-60be8f6b1ad5-kube-api-access-76p9n\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.396191 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-utilities\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.396228 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-catalog-content\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.420017 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76p9n\" (UniqueName: \"kubernetes.io/projected/ff9dff72-d876-4e68-a472-60be8f6b1ad5-kube-api-access-76p9n\") pod \"redhat-marketplace-ssr5x\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.641380 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.775907 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.904866 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-utilities\") pod \"ad299bc3-758d-40bb-92d8-62599397a2a2\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.905162 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-catalog-content\") pod \"ad299bc3-758d-40bb-92d8-62599397a2a2\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.905278 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkc9q\" (UniqueName: \"kubernetes.io/projected/ad299bc3-758d-40bb-92d8-62599397a2a2-kube-api-access-jkc9q\") pod \"ad299bc3-758d-40bb-92d8-62599397a2a2\" (UID: \"ad299bc3-758d-40bb-92d8-62599397a2a2\") " Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.906489 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-utilities" (OuterVolumeSpecName: "utilities") pod "ad299bc3-758d-40bb-92d8-62599397a2a2" (UID: "ad299bc3-758d-40bb-92d8-62599397a2a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.912389 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad299bc3-758d-40bb-92d8-62599397a2a2-kube-api-access-jkc9q" (OuterVolumeSpecName: "kube-api-access-jkc9q") pod "ad299bc3-758d-40bb-92d8-62599397a2a2" (UID: "ad299bc3-758d-40bb-92d8-62599397a2a2"). InnerVolumeSpecName "kube-api-access-jkc9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:20:13 crc kubenswrapper[4869]: I0929 15:20:13.969102 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad299bc3-758d-40bb-92d8-62599397a2a2" (UID: "ad299bc3-758d-40bb-92d8-62599397a2a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.010340 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.010383 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad299bc3-758d-40bb-92d8-62599397a2a2-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.010403 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkc9q\" (UniqueName: \"kubernetes.io/projected/ad299bc3-758d-40bb-92d8-62599397a2a2-kube-api-access-jkc9q\") on node \"crc\" DevicePath \"\"" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.114087 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssr5x"] Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.142844 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssr5x" event={"ID":"ff9dff72-d876-4e68-a472-60be8f6b1ad5","Type":"ContainerStarted","Data":"e8ad0e87fab5946845ccf4f4253af973658525fee7056c6b3293b65de0cd7428"} Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.145125 4869 generic.go:334] "Generic (PLEG): container finished" podID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerID="343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7" exitCode=0 Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.145154 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6br4" event={"ID":"ad299bc3-758d-40bb-92d8-62599397a2a2","Type":"ContainerDied","Data":"343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7"} Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.145180 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6br4" event={"ID":"ad299bc3-758d-40bb-92d8-62599397a2a2","Type":"ContainerDied","Data":"306be134bc68181064d946296c5f045d5e867f8608aa0ef87a975cc38f6e1c3d"} Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.145199 4869 scope.go:117] "RemoveContainer" containerID="343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.145206 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r6br4" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.180064 4869 scope.go:117] "RemoveContainer" containerID="be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.181115 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r6br4"] Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.191669 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r6br4"] Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.227533 4869 scope.go:117] "RemoveContainer" containerID="bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.251819 4869 scope.go:117] "RemoveContainer" containerID="343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7" Sep 29 15:20:14 crc kubenswrapper[4869]: E0929 15:20:14.252275 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7\": container with ID starting with 343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7 not found: ID does not exist" containerID="343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.252330 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7"} err="failed to get container status \"343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7\": rpc error: code = NotFound desc = could not find container \"343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7\": container with ID starting with 343ce2d81634f62b3f16892bcc334fd240218724f92951c6e50ca4c58d42aab7 not found: ID does not exist" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.252363 4869 scope.go:117] "RemoveContainer" containerID="be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed" Sep 29 15:20:14 crc kubenswrapper[4869]: E0929 15:20:14.252774 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed\": container with ID starting with be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed not found: ID does not exist" containerID="be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.252808 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed"} err="failed to get container status \"be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed\": rpc error: code = NotFound desc = could not find container \"be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed\": container with ID starting with be0e25a1ebd8cb1ee8c6ed14e9ac760c7d7af535c53da18ebb240fdfe6a174ed not found: ID does not exist" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.252832 4869 scope.go:117] "RemoveContainer" containerID="bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793" Sep 29 15:20:14 crc kubenswrapper[4869]: E0929 15:20:14.253096 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793\": container with ID starting with bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793 not found: ID does not exist" containerID="bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.253119 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793"} err="failed to get container status \"bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793\": rpc error: code = NotFound desc = could not find container \"bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793\": container with ID starting with bd434075008487d171dd0ec7077739a8b2769fc019152050cc54a00f05698793 not found: ID does not exist" Sep 29 15:20:14 crc kubenswrapper[4869]: I0929 15:20:14.256027 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" path="/var/lib/kubelet/pods/ad299bc3-758d-40bb-92d8-62599397a2a2/volumes" Sep 29 15:20:15 crc kubenswrapper[4869]: I0929 15:20:15.167362 4869 generic.go:334] "Generic (PLEG): container finished" podID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerID="8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923" exitCode=0 Sep 29 15:20:15 crc kubenswrapper[4869]: I0929 15:20:15.167535 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssr5x" event={"ID":"ff9dff72-d876-4e68-a472-60be8f6b1ad5","Type":"ContainerDied","Data":"8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923"} Sep 29 15:20:15 crc kubenswrapper[4869]: I0929 15:20:15.241937 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:20:15 crc kubenswrapper[4869]: E0929 15:20:15.242290 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:20:17 crc kubenswrapper[4869]: I0929 15:20:17.193837 4869 generic.go:334] "Generic (PLEG): container finished" podID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerID="70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0" exitCode=0 Sep 29 15:20:17 crc kubenswrapper[4869]: I0929 15:20:17.194389 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssr5x" event={"ID":"ff9dff72-d876-4e68-a472-60be8f6b1ad5","Type":"ContainerDied","Data":"70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0"} Sep 29 15:20:18 crc kubenswrapper[4869]: I0929 15:20:18.209773 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssr5x" event={"ID":"ff9dff72-d876-4e68-a472-60be8f6b1ad5","Type":"ContainerStarted","Data":"04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd"} Sep 29 15:20:18 crc kubenswrapper[4869]: I0929 15:20:18.235123 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ssr5x" podStartSLOduration=2.675025443 podStartE2EDuration="5.235087007s" podCreationTimestamp="2025-09-29 15:20:13 +0000 UTC" firstStartedPulling="2025-09-29 15:20:15.169950941 +0000 UTC m=+5941.610595261" lastFinishedPulling="2025-09-29 15:20:17.730012505 +0000 UTC m=+5944.170656825" observedRunningTime="2025-09-29 15:20:18.229586974 +0000 UTC m=+5944.670231294" watchObservedRunningTime="2025-09-29 15:20:18.235087007 +0000 UTC m=+5944.675731327" Sep 29 15:20:23 crc kubenswrapper[4869]: I0929 15:20:23.642147 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:23 crc kubenswrapper[4869]: I0929 15:20:23.643571 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:23 crc kubenswrapper[4869]: I0929 15:20:23.693266 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:24 crc kubenswrapper[4869]: I0929 15:20:24.332311 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:24 crc kubenswrapper[4869]: I0929 15:20:24.382527 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssr5x"] Sep 29 15:20:26 crc kubenswrapper[4869]: I0929 15:20:26.294151 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ssr5x" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerName="registry-server" containerID="cri-o://04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd" gracePeriod=2 Sep 29 15:20:26 crc kubenswrapper[4869]: I0929 15:20:26.819934 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:26 crc kubenswrapper[4869]: I0929 15:20:26.941057 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76p9n\" (UniqueName: \"kubernetes.io/projected/ff9dff72-d876-4e68-a472-60be8f6b1ad5-kube-api-access-76p9n\") pod \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " Sep 29 15:20:26 crc kubenswrapper[4869]: I0929 15:20:26.941211 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-utilities\") pod \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " Sep 29 15:20:26 crc kubenswrapper[4869]: I0929 15:20:26.941375 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-catalog-content\") pod \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\" (UID: \"ff9dff72-d876-4e68-a472-60be8f6b1ad5\") " Sep 29 15:20:26 crc kubenswrapper[4869]: I0929 15:20:26.942453 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-utilities" (OuterVolumeSpecName: "utilities") pod "ff9dff72-d876-4e68-a472-60be8f6b1ad5" (UID: "ff9dff72-d876-4e68-a472-60be8f6b1ad5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:20:26 crc kubenswrapper[4869]: I0929 15:20:26.950094 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff9dff72-d876-4e68-a472-60be8f6b1ad5-kube-api-access-76p9n" (OuterVolumeSpecName: "kube-api-access-76p9n") pod "ff9dff72-d876-4e68-a472-60be8f6b1ad5" (UID: "ff9dff72-d876-4e68-a472-60be8f6b1ad5"). InnerVolumeSpecName "kube-api-access-76p9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:20:26 crc kubenswrapper[4869]: I0929 15:20:26.960014 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff9dff72-d876-4e68-a472-60be8f6b1ad5" (UID: "ff9dff72-d876-4e68-a472-60be8f6b1ad5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.044940 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76p9n\" (UniqueName: \"kubernetes.io/projected/ff9dff72-d876-4e68-a472-60be8f6b1ad5-kube-api-access-76p9n\") on node \"crc\" DevicePath \"\"" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.044993 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.045006 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff9dff72-d876-4e68-a472-60be8f6b1ad5-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.242977 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:20:27 crc kubenswrapper[4869]: E0929 15:20:27.243229 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.309493 4869 generic.go:334] "Generic (PLEG): container finished" podID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerID="04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd" exitCode=0 Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.309543 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssr5x" event={"ID":"ff9dff72-d876-4e68-a472-60be8f6b1ad5","Type":"ContainerDied","Data":"04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd"} Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.309574 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssr5x" event={"ID":"ff9dff72-d876-4e68-a472-60be8f6b1ad5","Type":"ContainerDied","Data":"e8ad0e87fab5946845ccf4f4253af973658525fee7056c6b3293b65de0cd7428"} Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.309572 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ssr5x" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.309594 4869 scope.go:117] "RemoveContainer" containerID="04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.337668 4869 scope.go:117] "RemoveContainer" containerID="70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.353762 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssr5x"] Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.362847 4869 scope.go:117] "RemoveContainer" containerID="8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.378789 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssr5x"] Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.421030 4869 scope.go:117] "RemoveContainer" containerID="04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd" Sep 29 15:20:27 crc kubenswrapper[4869]: E0929 15:20:27.422275 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd\": container with ID starting with 04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd not found: ID does not exist" containerID="04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.422328 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd"} err="failed to get container status \"04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd\": rpc error: code = NotFound desc = could not find container \"04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd\": container with ID starting with 04b6d4534a1960f86bfc759f80563eb584f099e8c3ebf155741b1f044e89fbdd not found: ID does not exist" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.422367 4869 scope.go:117] "RemoveContainer" containerID="70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0" Sep 29 15:20:27 crc kubenswrapper[4869]: E0929 15:20:27.422988 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0\": container with ID starting with 70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0 not found: ID does not exist" containerID="70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.423028 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0"} err="failed to get container status \"70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0\": rpc error: code = NotFound desc = could not find container \"70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0\": container with ID starting with 70741f155e5fb94fd445611f3d8ee700b233427686c05621bef3a819bd50c1f0 not found: ID does not exist" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.423054 4869 scope.go:117] "RemoveContainer" containerID="8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923" Sep 29 15:20:27 crc kubenswrapper[4869]: E0929 15:20:27.423384 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923\": container with ID starting with 8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923 not found: ID does not exist" containerID="8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923" Sep 29 15:20:27 crc kubenswrapper[4869]: I0929 15:20:27.423472 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923"} err="failed to get container status \"8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923\": rpc error: code = NotFound desc = could not find container \"8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923\": container with ID starting with 8d50f28dcb0a36959218683606091a21da5b0bc5429f40db4e5ced704e71a923 not found: ID does not exist" Sep 29 15:20:28 crc kubenswrapper[4869]: I0929 15:20:28.255129 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" path="/var/lib/kubelet/pods/ff9dff72-d876-4e68-a472-60be8f6b1ad5/volumes" Sep 29 15:20:41 crc kubenswrapper[4869]: I0929 15:20:41.242533 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:20:41 crc kubenswrapper[4869]: E0929 15:20:41.243445 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:20:55 crc kubenswrapper[4869]: I0929 15:20:55.242196 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:20:55 crc kubenswrapper[4869]: E0929 15:20:55.243106 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:21:09 crc kubenswrapper[4869]: I0929 15:21:09.242750 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:21:09 crc kubenswrapper[4869]: E0929 15:21:09.243686 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:21:24 crc kubenswrapper[4869]: I0929 15:21:24.251472 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:21:24 crc kubenswrapper[4869]: E0929 15:21:24.252448 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:21:37 crc kubenswrapper[4869]: I0929 15:21:37.242424 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:21:37 crc kubenswrapper[4869]: E0929 15:21:37.243236 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:21:50 crc kubenswrapper[4869]: I0929 15:21:50.242420 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:21:50 crc kubenswrapper[4869]: E0929 15:21:50.243300 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:22:02 crc kubenswrapper[4869]: I0929 15:22:02.242173 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:22:02 crc kubenswrapper[4869]: E0929 15:22:02.243193 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:22:13 crc kubenswrapper[4869]: I0929 15:22:13.242366 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:22:13 crc kubenswrapper[4869]: E0929 15:22:13.243091 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.393780 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6wwzh"] Sep 29 15:22:16 crc kubenswrapper[4869]: E0929 15:22:16.396150 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerName="extract-content" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.396327 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerName="extract-content" Sep 29 15:22:16 crc kubenswrapper[4869]: E0929 15:22:16.396454 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerName="registry-server" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.396563 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerName="registry-server" Sep 29 15:22:16 crc kubenswrapper[4869]: E0929 15:22:16.396742 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerName="extract-utilities" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.397454 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerName="extract-utilities" Sep 29 15:22:16 crc kubenswrapper[4869]: E0929 15:22:16.397649 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerName="registry-server" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.397792 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerName="registry-server" Sep 29 15:22:16 crc kubenswrapper[4869]: E0929 15:22:16.397932 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerName="extract-content" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.398054 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerName="extract-content" Sep 29 15:22:16 crc kubenswrapper[4869]: E0929 15:22:16.398186 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerName="extract-utilities" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.398305 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerName="extract-utilities" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.398881 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad299bc3-758d-40bb-92d8-62599397a2a2" containerName="registry-server" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.399174 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff9dff72-d876-4e68-a472-60be8f6b1ad5" containerName="registry-server" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.401935 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.405958 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6wwzh"] Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.487331 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-utilities\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.488017 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlx79\" (UniqueName: \"kubernetes.io/projected/400cd919-6233-4f07-9651-081594e8c8d8-kube-api-access-hlx79\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.488274 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-catalog-content\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.590644 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlx79\" (UniqueName: \"kubernetes.io/projected/400cd919-6233-4f07-9651-081594e8c8d8-kube-api-access-hlx79\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.590977 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-catalog-content\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.591022 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-utilities\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.591778 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-catalog-content\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.591797 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-utilities\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.616069 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlx79\" (UniqueName: \"kubernetes.io/projected/400cd919-6233-4f07-9651-081594e8c8d8-kube-api-access-hlx79\") pod \"community-operators-6wwzh\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:16 crc kubenswrapper[4869]: I0929 15:22:16.738402 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:17 crc kubenswrapper[4869]: I0929 15:22:17.370186 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6wwzh"] Sep 29 15:22:17 crc kubenswrapper[4869]: I0929 15:22:17.436838 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wwzh" event={"ID":"400cd919-6233-4f07-9651-081594e8c8d8","Type":"ContainerStarted","Data":"7b8f74f1df5a878d81794244048ae8079b6c1255d35bce37d495e745b3f0ded3"} Sep 29 15:22:18 crc kubenswrapper[4869]: I0929 15:22:18.462194 4869 generic.go:334] "Generic (PLEG): container finished" podID="400cd919-6233-4f07-9651-081594e8c8d8" containerID="1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2" exitCode=0 Sep 29 15:22:18 crc kubenswrapper[4869]: I0929 15:22:18.462457 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wwzh" event={"ID":"400cd919-6233-4f07-9651-081594e8c8d8","Type":"ContainerDied","Data":"1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2"} Sep 29 15:22:19 crc kubenswrapper[4869]: I0929 15:22:19.475063 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wwzh" event={"ID":"400cd919-6233-4f07-9651-081594e8c8d8","Type":"ContainerStarted","Data":"f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea"} Sep 29 15:22:20 crc kubenswrapper[4869]: I0929 15:22:20.485972 4869 generic.go:334] "Generic (PLEG): container finished" podID="400cd919-6233-4f07-9651-081594e8c8d8" containerID="f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea" exitCode=0 Sep 29 15:22:20 crc kubenswrapper[4869]: I0929 15:22:20.486063 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wwzh" event={"ID":"400cd919-6233-4f07-9651-081594e8c8d8","Type":"ContainerDied","Data":"f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea"} Sep 29 15:22:20 crc kubenswrapper[4869]: E0929 15:22:20.638039 4869 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod400cd919_6233_4f07_9651_081594e8c8d8.slice/crio-f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea.scope\": RecentStats: unable to find data in memory cache]" Sep 29 15:22:21 crc kubenswrapper[4869]: I0929 15:22:21.498153 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wwzh" event={"ID":"400cd919-6233-4f07-9651-081594e8c8d8","Type":"ContainerStarted","Data":"b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff"} Sep 29 15:22:21 crc kubenswrapper[4869]: I0929 15:22:21.526673 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6wwzh" podStartSLOduration=3.115487294 podStartE2EDuration="5.526653087s" podCreationTimestamp="2025-09-29 15:22:16 +0000 UTC" firstStartedPulling="2025-09-29 15:22:18.464829737 +0000 UTC m=+6064.905474097" lastFinishedPulling="2025-09-29 15:22:20.87599557 +0000 UTC m=+6067.316639890" observedRunningTime="2025-09-29 15:22:21.517006525 +0000 UTC m=+6067.957650865" watchObservedRunningTime="2025-09-29 15:22:21.526653087 +0000 UTC m=+6067.967297397" Sep 29 15:22:25 crc kubenswrapper[4869]: I0929 15:22:25.242057 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:22:25 crc kubenswrapper[4869]: E0929 15:22:25.242899 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:22:26 crc kubenswrapper[4869]: I0929 15:22:26.738786 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:26 crc kubenswrapper[4869]: I0929 15:22:26.739148 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:26 crc kubenswrapper[4869]: I0929 15:22:26.790264 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:27 crc kubenswrapper[4869]: I0929 15:22:27.596773 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:27 crc kubenswrapper[4869]: I0929 15:22:27.647508 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6wwzh"] Sep 29 15:22:29 crc kubenswrapper[4869]: I0929 15:22:29.572910 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6wwzh" podUID="400cd919-6233-4f07-9651-081594e8c8d8" containerName="registry-server" containerID="cri-o://b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff" gracePeriod=2 Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.033540 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.147028 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlx79\" (UniqueName: \"kubernetes.io/projected/400cd919-6233-4f07-9651-081594e8c8d8-kube-api-access-hlx79\") pod \"400cd919-6233-4f07-9651-081594e8c8d8\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.147186 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-catalog-content\") pod \"400cd919-6233-4f07-9651-081594e8c8d8\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.147464 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-utilities\") pod \"400cd919-6233-4f07-9651-081594e8c8d8\" (UID: \"400cd919-6233-4f07-9651-081594e8c8d8\") " Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.148693 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-utilities" (OuterVolumeSpecName: "utilities") pod "400cd919-6233-4f07-9651-081594e8c8d8" (UID: "400cd919-6233-4f07-9651-081594e8c8d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.152819 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/400cd919-6233-4f07-9651-081594e8c8d8-kube-api-access-hlx79" (OuterVolumeSpecName: "kube-api-access-hlx79") pod "400cd919-6233-4f07-9651-081594e8c8d8" (UID: "400cd919-6233-4f07-9651-081594e8c8d8"). InnerVolumeSpecName "kube-api-access-hlx79". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.209424 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "400cd919-6233-4f07-9651-081594e8c8d8" (UID: "400cd919-6233-4f07-9651-081594e8c8d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.249896 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.250212 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlx79\" (UniqueName: \"kubernetes.io/projected/400cd919-6233-4f07-9651-081594e8c8d8-kube-api-access-hlx79\") on node \"crc\" DevicePath \"\"" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.250228 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/400cd919-6233-4f07-9651-081594e8c8d8-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.585285 4869 generic.go:334] "Generic (PLEG): container finished" podID="400cd919-6233-4f07-9651-081594e8c8d8" containerID="b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff" exitCode=0 Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.585332 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wwzh" event={"ID":"400cd919-6233-4f07-9651-081594e8c8d8","Type":"ContainerDied","Data":"b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff"} Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.585368 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wwzh" event={"ID":"400cd919-6233-4f07-9651-081594e8c8d8","Type":"ContainerDied","Data":"7b8f74f1df5a878d81794244048ae8079b6c1255d35bce37d495e745b3f0ded3"} Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.585416 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wwzh" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.585423 4869 scope.go:117] "RemoveContainer" containerID="b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.612643 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6wwzh"] Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.613009 4869 scope.go:117] "RemoveContainer" containerID="f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.623445 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6wwzh"] Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.643344 4869 scope.go:117] "RemoveContainer" containerID="1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.697738 4869 scope.go:117] "RemoveContainer" containerID="b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff" Sep 29 15:22:30 crc kubenswrapper[4869]: E0929 15:22:30.698350 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff\": container with ID starting with b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff not found: ID does not exist" containerID="b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.698389 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff"} err="failed to get container status \"b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff\": rpc error: code = NotFound desc = could not find container \"b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff\": container with ID starting with b8f34bc6141ca3ef3a54d2be126c1d0960d36e0b00484551f30db92b5ee8fbff not found: ID does not exist" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.698419 4869 scope.go:117] "RemoveContainer" containerID="f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea" Sep 29 15:22:30 crc kubenswrapper[4869]: E0929 15:22:30.698772 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea\": container with ID starting with f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea not found: ID does not exist" containerID="f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.698800 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea"} err="failed to get container status \"f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea\": rpc error: code = NotFound desc = could not find container \"f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea\": container with ID starting with f882881d14f5e9eee76e5951db24895c6542980b36e898c399fcf6ce32daa7ea not found: ID does not exist" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.698816 4869 scope.go:117] "RemoveContainer" containerID="1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2" Sep 29 15:22:30 crc kubenswrapper[4869]: E0929 15:22:30.699078 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2\": container with ID starting with 1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2 not found: ID does not exist" containerID="1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2" Sep 29 15:22:30 crc kubenswrapper[4869]: I0929 15:22:30.699097 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2"} err="failed to get container status \"1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2\": rpc error: code = NotFound desc = could not find container \"1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2\": container with ID starting with 1f6fa64f4a97f2420b7522ec488835831e8a9979726cc9d90b98588c83952cb2 not found: ID does not exist" Sep 29 15:22:32 crc kubenswrapper[4869]: I0929 15:22:32.255583 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="400cd919-6233-4f07-9651-081594e8c8d8" path="/var/lib/kubelet/pods/400cd919-6233-4f07-9651-081594e8c8d8/volumes" Sep 29 15:22:40 crc kubenswrapper[4869]: I0929 15:22:40.242049 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:22:40 crc kubenswrapper[4869]: E0929 15:22:40.244115 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:22:52 crc kubenswrapper[4869]: I0929 15:22:52.241770 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:22:52 crc kubenswrapper[4869]: E0929 15:22:52.242679 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:23:04 crc kubenswrapper[4869]: I0929 15:23:04.251335 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:23:04 crc kubenswrapper[4869]: E0929 15:23:04.252160 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:23:16 crc kubenswrapper[4869]: I0929 15:23:16.242642 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:23:16 crc kubenswrapper[4869]: E0929 15:23:16.243513 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:23:30 crc kubenswrapper[4869]: I0929 15:23:30.242861 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:23:31 crc kubenswrapper[4869]: I0929 15:23:31.179247 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"4d37bc755ca7adbdd6450afec9061d44a08adc8ad9870ff77ef253cf25835811"} Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.543024 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dzkpg"] Sep 29 15:23:34 crc kubenswrapper[4869]: E0929 15:23:34.544537 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="400cd919-6233-4f07-9651-081594e8c8d8" containerName="extract-content" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.544559 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="400cd919-6233-4f07-9651-081594e8c8d8" containerName="extract-content" Sep 29 15:23:34 crc kubenswrapper[4869]: E0929 15:23:34.544630 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="400cd919-6233-4f07-9651-081594e8c8d8" containerName="extract-utilities" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.544641 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="400cd919-6233-4f07-9651-081594e8c8d8" containerName="extract-utilities" Sep 29 15:23:34 crc kubenswrapper[4869]: E0929 15:23:34.544673 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="400cd919-6233-4f07-9651-081594e8c8d8" containerName="registry-server" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.544680 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="400cd919-6233-4f07-9651-081594e8c8d8" containerName="registry-server" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.544962 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="400cd919-6233-4f07-9651-081594e8c8d8" containerName="registry-server" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.547282 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.567143 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dzkpg"] Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.659531 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-catalog-content\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.659705 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q297\" (UniqueName: \"kubernetes.io/projected/04e64653-f61f-4284-be2f-c70bac71577e-kube-api-access-8q297\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.659781 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-utilities\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.762898 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-catalog-content\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.763013 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q297\" (UniqueName: \"kubernetes.io/projected/04e64653-f61f-4284-be2f-c70bac71577e-kube-api-access-8q297\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.763068 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-utilities\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.763535 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-catalog-content\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.763722 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-utilities\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.788067 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q297\" (UniqueName: \"kubernetes.io/projected/04e64653-f61f-4284-be2f-c70bac71577e-kube-api-access-8q297\") pod \"redhat-operators-dzkpg\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:34 crc kubenswrapper[4869]: I0929 15:23:34.905235 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:35 crc kubenswrapper[4869]: I0929 15:23:35.466721 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dzkpg"] Sep 29 15:23:36 crc kubenswrapper[4869]: I0929 15:23:36.239845 4869 generic.go:334] "Generic (PLEG): container finished" podID="04e64653-f61f-4284-be2f-c70bac71577e" containerID="38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0" exitCode=0 Sep 29 15:23:36 crc kubenswrapper[4869]: I0929 15:23:36.239931 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzkpg" event={"ID":"04e64653-f61f-4284-be2f-c70bac71577e","Type":"ContainerDied","Data":"38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0"} Sep 29 15:23:36 crc kubenswrapper[4869]: I0929 15:23:36.240145 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzkpg" event={"ID":"04e64653-f61f-4284-be2f-c70bac71577e","Type":"ContainerStarted","Data":"7b3906d3e0796ef4397ae5d703ac805085a42f95b5e5df3d0de967b486d25b45"} Sep 29 15:23:37 crc kubenswrapper[4869]: I0929 15:23:37.253543 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzkpg" event={"ID":"04e64653-f61f-4284-be2f-c70bac71577e","Type":"ContainerStarted","Data":"ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8"} Sep 29 15:23:39 crc kubenswrapper[4869]: I0929 15:23:39.299315 4869 generic.go:334] "Generic (PLEG): container finished" podID="04e64653-f61f-4284-be2f-c70bac71577e" containerID="ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8" exitCode=0 Sep 29 15:23:39 crc kubenswrapper[4869]: I0929 15:23:39.299419 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzkpg" event={"ID":"04e64653-f61f-4284-be2f-c70bac71577e","Type":"ContainerDied","Data":"ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8"} Sep 29 15:23:41 crc kubenswrapper[4869]: I0929 15:23:41.331655 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzkpg" event={"ID":"04e64653-f61f-4284-be2f-c70bac71577e","Type":"ContainerStarted","Data":"f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87"} Sep 29 15:23:41 crc kubenswrapper[4869]: I0929 15:23:41.359407 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dzkpg" podStartSLOduration=3.301670704 podStartE2EDuration="7.359375846s" podCreationTimestamp="2025-09-29 15:23:34 +0000 UTC" firstStartedPulling="2025-09-29 15:23:36.242572484 +0000 UTC m=+6142.683216804" lastFinishedPulling="2025-09-29 15:23:40.300277626 +0000 UTC m=+6146.740921946" observedRunningTime="2025-09-29 15:23:41.349123019 +0000 UTC m=+6147.789767349" watchObservedRunningTime="2025-09-29 15:23:41.359375846 +0000 UTC m=+6147.800020166" Sep 29 15:23:44 crc kubenswrapper[4869]: I0929 15:23:44.905985 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:44 crc kubenswrapper[4869]: I0929 15:23:44.906602 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:45 crc kubenswrapper[4869]: I0929 15:23:45.954033 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dzkpg" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="registry-server" probeResult="failure" output=< Sep 29 15:23:45 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 15:23:45 crc kubenswrapper[4869]: > Sep 29 15:23:54 crc kubenswrapper[4869]: I0929 15:23:54.959923 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:55 crc kubenswrapper[4869]: I0929 15:23:55.015960 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:55 crc kubenswrapper[4869]: I0929 15:23:55.216432 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dzkpg"] Sep 29 15:23:56 crc kubenswrapper[4869]: I0929 15:23:56.482340 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dzkpg" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="registry-server" containerID="cri-o://f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87" gracePeriod=2 Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.018390 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.048230 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-catalog-content\") pod \"04e64653-f61f-4284-be2f-c70bac71577e\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.048324 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-utilities\") pod \"04e64653-f61f-4284-be2f-c70bac71577e\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.048490 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8q297\" (UniqueName: \"kubernetes.io/projected/04e64653-f61f-4284-be2f-c70bac71577e-kube-api-access-8q297\") pod \"04e64653-f61f-4284-be2f-c70bac71577e\" (UID: \"04e64653-f61f-4284-be2f-c70bac71577e\") " Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.049235 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-utilities" (OuterVolumeSpecName: "utilities") pod "04e64653-f61f-4284-be2f-c70bac71577e" (UID: "04e64653-f61f-4284-be2f-c70bac71577e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.050220 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.057003 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04e64653-f61f-4284-be2f-c70bac71577e-kube-api-access-8q297" (OuterVolumeSpecName: "kube-api-access-8q297") pod "04e64653-f61f-4284-be2f-c70bac71577e" (UID: "04e64653-f61f-4284-be2f-c70bac71577e"). InnerVolumeSpecName "kube-api-access-8q297". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.153560 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8q297\" (UniqueName: \"kubernetes.io/projected/04e64653-f61f-4284-be2f-c70bac71577e-kube-api-access-8q297\") on node \"crc\" DevicePath \"\"" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.155023 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "04e64653-f61f-4284-be2f-c70bac71577e" (UID: "04e64653-f61f-4284-be2f-c70bac71577e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.254681 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04e64653-f61f-4284-be2f-c70bac71577e-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.494198 4869 generic.go:334] "Generic (PLEG): container finished" podID="04e64653-f61f-4284-be2f-c70bac71577e" containerID="f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87" exitCode=0 Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.494248 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzkpg" event={"ID":"04e64653-f61f-4284-be2f-c70bac71577e","Type":"ContainerDied","Data":"f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87"} Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.494287 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzkpg" event={"ID":"04e64653-f61f-4284-be2f-c70bac71577e","Type":"ContainerDied","Data":"7b3906d3e0796ef4397ae5d703ac805085a42f95b5e5df3d0de967b486d25b45"} Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.494311 4869 scope.go:117] "RemoveContainer" containerID="f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.494330 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dzkpg" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.530786 4869 scope.go:117] "RemoveContainer" containerID="ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.548721 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dzkpg"] Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.555013 4869 scope.go:117] "RemoveContainer" containerID="38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.560813 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dzkpg"] Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.611659 4869 scope.go:117] "RemoveContainer" containerID="f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87" Sep 29 15:23:57 crc kubenswrapper[4869]: E0929 15:23:57.612514 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87\": container with ID starting with f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87 not found: ID does not exist" containerID="f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.612601 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87"} err="failed to get container status \"f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87\": rpc error: code = NotFound desc = could not find container \"f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87\": container with ID starting with f40082c904268848fa8c60d2610fb02814926c3260da46f54b3aa72a50b61f87 not found: ID does not exist" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.612656 4869 scope.go:117] "RemoveContainer" containerID="ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8" Sep 29 15:23:57 crc kubenswrapper[4869]: E0929 15:23:57.613102 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8\": container with ID starting with ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8 not found: ID does not exist" containerID="ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.613155 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8"} err="failed to get container status \"ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8\": rpc error: code = NotFound desc = could not find container \"ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8\": container with ID starting with ffb875d136fe96edb0c7c6921d4a0da6cdc5e6fc72fff806f3dc7ccb5b6ff6e8 not found: ID does not exist" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.613192 4869 scope.go:117] "RemoveContainer" containerID="38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0" Sep 29 15:23:57 crc kubenswrapper[4869]: E0929 15:23:57.613506 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0\": container with ID starting with 38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0 not found: ID does not exist" containerID="38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0" Sep 29 15:23:57 crc kubenswrapper[4869]: I0929 15:23:57.613534 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0"} err="failed to get container status \"38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0\": rpc error: code = NotFound desc = could not find container \"38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0\": container with ID starting with 38fba27fda8b68d49cf53db844a505e34354455cff7a9867cc0dd8cee795f9e0 not found: ID does not exist" Sep 29 15:23:58 crc kubenswrapper[4869]: I0929 15:23:58.253563 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04e64653-f61f-4284-be2f-c70bac71577e" path="/var/lib/kubelet/pods/04e64653-f61f-4284-be2f-c70bac71577e/volumes" Sep 29 15:25:50 crc kubenswrapper[4869]: I0929 15:25:50.657418 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:25:50 crc kubenswrapper[4869]: I0929 15:25:50.657962 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:26:20 crc kubenswrapper[4869]: I0929 15:26:20.657516 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:26:20 crc kubenswrapper[4869]: I0929 15:26:20.659918 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:26:50 crc kubenswrapper[4869]: I0929 15:26:50.657287 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:26:50 crc kubenswrapper[4869]: I0929 15:26:50.657939 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:26:50 crc kubenswrapper[4869]: I0929 15:26:50.658000 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 15:26:50 crc kubenswrapper[4869]: I0929 15:26:50.659016 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4d37bc755ca7adbdd6450afec9061d44a08adc8ad9870ff77ef253cf25835811"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 15:26:50 crc kubenswrapper[4869]: I0929 15:26:50.659116 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://4d37bc755ca7adbdd6450afec9061d44a08adc8ad9870ff77ef253cf25835811" gracePeriod=600 Sep 29 15:26:51 crc kubenswrapper[4869]: I0929 15:26:51.311000 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="4d37bc755ca7adbdd6450afec9061d44a08adc8ad9870ff77ef253cf25835811" exitCode=0 Sep 29 15:26:51 crc kubenswrapper[4869]: I0929 15:26:51.311196 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"4d37bc755ca7adbdd6450afec9061d44a08adc8ad9870ff77ef253cf25835811"} Sep 29 15:26:51 crc kubenswrapper[4869]: I0929 15:26:51.311520 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc"} Sep 29 15:26:51 crc kubenswrapper[4869]: I0929 15:26:51.311598 4869 scope.go:117] "RemoveContainer" containerID="8b4fed121afc5bb7fcfc158f44c553d7e301ffe7aff4fde857c6125a697f83d2" Sep 29 15:29:20 crc kubenswrapper[4869]: I0929 15:29:20.658999 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:29:20 crc kubenswrapper[4869]: I0929 15:29:20.659526 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:29:50 crc kubenswrapper[4869]: I0929 15:29:50.657087 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:29:50 crc kubenswrapper[4869]: I0929 15:29:50.658832 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:29:52 crc kubenswrapper[4869]: I0929 15:29:52.189921 4869 generic.go:334] "Generic (PLEG): container finished" podID="3b562fc4-b928-4883-8bd5-2db40da004d0" containerID="41cb5366a81367e1f0864db0f1488e81d36d43df650c7c5c97b20434c3ff28ed" exitCode=1 Sep 29 15:29:52 crc kubenswrapper[4869]: I0929 15:29:52.190033 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"3b562fc4-b928-4883-8bd5-2db40da004d0","Type":"ContainerDied","Data":"41cb5366a81367e1f0864db0f1488e81d36d43df650c7c5c97b20434c3ff28ed"} Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.636703 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744027 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config-secret\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744156 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-workdir\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744382 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-config-data\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744427 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ca-certs\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744519 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjb2p\" (UniqueName: \"kubernetes.io/projected/3b562fc4-b928-4883-8bd5-2db40da004d0-kube-api-access-gjb2p\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744577 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-temporary\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744665 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ssh-key\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744794 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.744837 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"3b562fc4-b928-4883-8bd5-2db40da004d0\" (UID: \"3b562fc4-b928-4883-8bd5-2db40da004d0\") " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.745178 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-config-data" (OuterVolumeSpecName: "config-data") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.745984 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.746076 4869 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-config-data\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.752381 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "test-operator-logs") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.762738 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.770778 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b562fc4-b928-4883-8bd5-2db40da004d0-kube-api-access-gjb2p" (OuterVolumeSpecName: "kube-api-access-gjb2p") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "kube-api-access-gjb2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.779918 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.780552 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.809071 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.825771 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "3b562fc4-b928-4883-8bd5-2db40da004d0" (UID: "3b562fc4-b928-4883-8bd5-2db40da004d0"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.848240 4869 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.848293 4869 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.848314 4869 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ca-certs\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.848331 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjb2p\" (UniqueName: \"kubernetes.io/projected/3b562fc4-b928-4883-8bd5-2db40da004d0-kube-api-access-gjb2p\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.848348 4869 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3b562fc4-b928-4883-8bd5-2db40da004d0-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.848366 4869 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3b562fc4-b928-4883-8bd5-2db40da004d0-ssh-key\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.848383 4869 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3b562fc4-b928-4883-8bd5-2db40da004d0-openstack-config\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.848469 4869 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.877389 4869 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Sep 29 15:29:53 crc kubenswrapper[4869]: I0929 15:29:53.950867 4869 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Sep 29 15:29:54 crc kubenswrapper[4869]: I0929 15:29:54.228080 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"3b562fc4-b928-4883-8bd5-2db40da004d0","Type":"ContainerDied","Data":"a42d6927b6e9f776fce725ee7d7c728f797c27b2938a6311d6b735150962ec92"} Sep 29 15:29:54 crc kubenswrapper[4869]: I0929 15:29:54.228131 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a42d6927b6e9f776fce725ee7d7c728f797c27b2938a6311d6b735150962ec92" Sep 29 15:29:54 crc kubenswrapper[4869]: I0929 15:29:54.228187 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.660557 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Sep 29 15:29:59 crc kubenswrapper[4869]: E0929 15:29:59.661783 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="registry-server" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.661798 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="registry-server" Sep 29 15:29:59 crc kubenswrapper[4869]: E0929 15:29:59.661823 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="extract-content" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.661829 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="extract-content" Sep 29 15:29:59 crc kubenswrapper[4869]: E0929 15:29:59.661847 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="extract-utilities" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.661854 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="extract-utilities" Sep 29 15:29:59 crc kubenswrapper[4869]: E0929 15:29:59.661876 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b562fc4-b928-4883-8bd5-2db40da004d0" containerName="tempest-tests-tempest-tests-runner" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.661883 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b562fc4-b928-4883-8bd5-2db40da004d0" containerName="tempest-tests-tempest-tests-runner" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.662291 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b562fc4-b928-4883-8bd5-2db40da004d0" containerName="tempest-tests-tempest-tests-runner" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.662302 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="04e64653-f61f-4284-be2f-c70bac71577e" containerName="registry-server" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.663169 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.666297 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-hsqq2" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.676335 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.786666 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"cd829a5f-7d4e-443d-8f1c-815fc55af7b7\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.786772 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whc9s\" (UniqueName: \"kubernetes.io/projected/cd829a5f-7d4e-443d-8f1c-815fc55af7b7-kube-api-access-whc9s\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"cd829a5f-7d4e-443d-8f1c-815fc55af7b7\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.889072 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"cd829a5f-7d4e-443d-8f1c-815fc55af7b7\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.889161 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whc9s\" (UniqueName: \"kubernetes.io/projected/cd829a5f-7d4e-443d-8f1c-815fc55af7b7-kube-api-access-whc9s\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"cd829a5f-7d4e-443d-8f1c-815fc55af7b7\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.889803 4869 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"cd829a5f-7d4e-443d-8f1c-815fc55af7b7\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.909860 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whc9s\" (UniqueName: \"kubernetes.io/projected/cd829a5f-7d4e-443d-8f1c-815fc55af7b7-kube-api-access-whc9s\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"cd829a5f-7d4e-443d-8f1c-815fc55af7b7\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.923752 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"cd829a5f-7d4e-443d-8f1c-815fc55af7b7\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:29:59 crc kubenswrapper[4869]: I0929 15:29:59.993796 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.212318 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr"] Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.221064 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.224552 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr"] Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.225587 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.227507 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.407486 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/518046da-5baa-4a0c-af61-f3dced1e531b-secret-volume\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.407545 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/518046da-5baa-4a0c-af61-f3dced1e531b-config-volume\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.407660 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2tck\" (UniqueName: \"kubernetes.io/projected/518046da-5baa-4a0c-af61-f3dced1e531b-kube-api-access-b2tck\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.493695 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.504399 4869 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.511012 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/518046da-5baa-4a0c-af61-f3dced1e531b-secret-volume\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.511101 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/518046da-5baa-4a0c-af61-f3dced1e531b-config-volume\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.511209 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2tck\" (UniqueName: \"kubernetes.io/projected/518046da-5baa-4a0c-af61-f3dced1e531b-kube-api-access-b2tck\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.512449 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/518046da-5baa-4a0c-af61-f3dced1e531b-config-volume\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.521696 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/518046da-5baa-4a0c-af61-f3dced1e531b-secret-volume\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.531836 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2tck\" (UniqueName: \"kubernetes.io/projected/518046da-5baa-4a0c-af61-f3dced1e531b-kube-api-access-b2tck\") pod \"collect-profiles-29319330-7sddr\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:00 crc kubenswrapper[4869]: I0929 15:30:00.555823 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:01 crc kubenswrapper[4869]: W0929 15:30:01.018021 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod518046da_5baa_4a0c_af61_f3dced1e531b.slice/crio-b8ccc1d154ea871df46ad7759fca386ba581e45174ab7ac3b219aed072ce723d WatchSource:0}: Error finding container b8ccc1d154ea871df46ad7759fca386ba581e45174ab7ac3b219aed072ce723d: Status 404 returned error can't find the container with id b8ccc1d154ea871df46ad7759fca386ba581e45174ab7ac3b219aed072ce723d Sep 29 15:30:01 crc kubenswrapper[4869]: I0929 15:30:01.021464 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr"] Sep 29 15:30:01 crc kubenswrapper[4869]: I0929 15:30:01.318495 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"cd829a5f-7d4e-443d-8f1c-815fc55af7b7","Type":"ContainerStarted","Data":"eb46b8c80670b836742dd9d6d0238a7dc4383b0968e70119d00053463809f203"} Sep 29 15:30:01 crc kubenswrapper[4869]: I0929 15:30:01.320019 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" event={"ID":"518046da-5baa-4a0c-af61-f3dced1e531b","Type":"ContainerStarted","Data":"af56c3e43b11680123780e1ebd46c1268e1ea790d276f2106d77db65f24b6b2e"} Sep 29 15:30:01 crc kubenswrapper[4869]: I0929 15:30:01.320086 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" event={"ID":"518046da-5baa-4a0c-af61-f3dced1e531b","Type":"ContainerStarted","Data":"b8ccc1d154ea871df46ad7759fca386ba581e45174ab7ac3b219aed072ce723d"} Sep 29 15:30:01 crc kubenswrapper[4869]: I0929 15:30:01.360667 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" podStartSLOduration=1.360630615 podStartE2EDuration="1.360630615s" podCreationTimestamp="2025-09-29 15:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 15:30:01.335813186 +0000 UTC m=+6527.776457526" watchObservedRunningTime="2025-09-29 15:30:01.360630615 +0000 UTC m=+6527.801274935" Sep 29 15:30:02 crc kubenswrapper[4869]: I0929 15:30:02.333086 4869 generic.go:334] "Generic (PLEG): container finished" podID="518046da-5baa-4a0c-af61-f3dced1e531b" containerID="af56c3e43b11680123780e1ebd46c1268e1ea790d276f2106d77db65f24b6b2e" exitCode=0 Sep 29 15:30:02 crc kubenswrapper[4869]: I0929 15:30:02.333147 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" event={"ID":"518046da-5baa-4a0c-af61-f3dced1e531b","Type":"ContainerDied","Data":"af56c3e43b11680123780e1ebd46c1268e1ea790d276f2106d77db65f24b6b2e"} Sep 29 15:30:02 crc kubenswrapper[4869]: I0929 15:30:02.335943 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"cd829a5f-7d4e-443d-8f1c-815fc55af7b7","Type":"ContainerStarted","Data":"e94ecc1067cb9ba263ad4b141225d06d84d573e0b218287241d40bbb7de964b3"} Sep 29 15:30:02 crc kubenswrapper[4869]: I0929 15:30:02.370563 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.27819912 podStartE2EDuration="3.370540087s" podCreationTimestamp="2025-09-29 15:29:59 +0000 UTC" firstStartedPulling="2025-09-29 15:30:00.504083603 +0000 UTC m=+6526.944727923" lastFinishedPulling="2025-09-29 15:30:01.59642457 +0000 UTC m=+6528.037068890" observedRunningTime="2025-09-29 15:30:02.365888656 +0000 UTC m=+6528.806532996" watchObservedRunningTime="2025-09-29 15:30:02.370540087 +0000 UTC m=+6528.811184407" Sep 29 15:30:03 crc kubenswrapper[4869]: I0929 15:30:03.816830 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:03 crc kubenswrapper[4869]: I0929 15:30:03.995342 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/518046da-5baa-4a0c-af61-f3dced1e531b-secret-volume\") pod \"518046da-5baa-4a0c-af61-f3dced1e531b\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " Sep 29 15:30:03 crc kubenswrapper[4869]: I0929 15:30:03.995578 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2tck\" (UniqueName: \"kubernetes.io/projected/518046da-5baa-4a0c-af61-f3dced1e531b-kube-api-access-b2tck\") pod \"518046da-5baa-4a0c-af61-f3dced1e531b\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " Sep 29 15:30:03 crc kubenswrapper[4869]: I0929 15:30:03.995734 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/518046da-5baa-4a0c-af61-f3dced1e531b-config-volume\") pod \"518046da-5baa-4a0c-af61-f3dced1e531b\" (UID: \"518046da-5baa-4a0c-af61-f3dced1e531b\") " Sep 29 15:30:03 crc kubenswrapper[4869]: I0929 15:30:03.997550 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/518046da-5baa-4a0c-af61-f3dced1e531b-config-volume" (OuterVolumeSpecName: "config-volume") pod "518046da-5baa-4a0c-af61-f3dced1e531b" (UID: "518046da-5baa-4a0c-af61-f3dced1e531b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.002375 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/518046da-5baa-4a0c-af61-f3dced1e531b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "518046da-5baa-4a0c-af61-f3dced1e531b" (UID: "518046da-5baa-4a0c-af61-f3dced1e531b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.017403 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/518046da-5baa-4a0c-af61-f3dced1e531b-kube-api-access-b2tck" (OuterVolumeSpecName: "kube-api-access-b2tck") pod "518046da-5baa-4a0c-af61-f3dced1e531b" (UID: "518046da-5baa-4a0c-af61-f3dced1e531b"). InnerVolumeSpecName "kube-api-access-b2tck". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.098911 4869 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/518046da-5baa-4a0c-af61-f3dced1e531b-secret-volume\") on node \"crc\" DevicePath \"\"" Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.098949 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2tck\" (UniqueName: \"kubernetes.io/projected/518046da-5baa-4a0c-af61-f3dced1e531b-kube-api-access-b2tck\") on node \"crc\" DevicePath \"\"" Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.098958 4869 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/518046da-5baa-4a0c-af61-f3dced1e531b-config-volume\") on node \"crc\" DevicePath \"\"" Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.374339 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" event={"ID":"518046da-5baa-4a0c-af61-f3dced1e531b","Type":"ContainerDied","Data":"b8ccc1d154ea871df46ad7759fca386ba581e45174ab7ac3b219aed072ce723d"} Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.374397 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8ccc1d154ea871df46ad7759fca386ba581e45174ab7ac3b219aed072ce723d" Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.374494 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29319330-7sddr" Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.421454 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw"] Sep 29 15:30:04 crc kubenswrapper[4869]: I0929 15:30:04.431275 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29319285-bnfdw"] Sep 29 15:30:06 crc kubenswrapper[4869]: I0929 15:30:06.257548 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce60ac7b-6cf4-49ec-b2b3-a006648e73a6" path="/var/lib/kubelet/pods/ce60ac7b-6cf4-49ec-b2b3-a006648e73a6/volumes" Sep 29 15:30:20 crc kubenswrapper[4869]: I0929 15:30:20.657040 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:30:20 crc kubenswrapper[4869]: I0929 15:30:20.657594 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:30:20 crc kubenswrapper[4869]: I0929 15:30:20.657688 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 15:30:20 crc kubenswrapper[4869]: I0929 15:30:20.658825 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 15:30:20 crc kubenswrapper[4869]: I0929 15:30:20.658964 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" gracePeriod=600 Sep 29 15:30:20 crc kubenswrapper[4869]: E0929 15:30:20.794583 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:30:21 crc kubenswrapper[4869]: I0929 15:30:21.564457 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" exitCode=0 Sep 29 15:30:21 crc kubenswrapper[4869]: I0929 15:30:21.564536 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc"} Sep 29 15:30:21 crc kubenswrapper[4869]: I0929 15:30:21.564625 4869 scope.go:117] "RemoveContainer" containerID="4d37bc755ca7adbdd6450afec9061d44a08adc8ad9870ff77ef253cf25835811" Sep 29 15:30:21 crc kubenswrapper[4869]: I0929 15:30:21.566202 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:30:21 crc kubenswrapper[4869]: E0929 15:30:21.566701 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.193936 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x9dpx"] Sep 29 15:30:25 crc kubenswrapper[4869]: E0929 15:30:25.198598 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="518046da-5baa-4a0c-af61-f3dced1e531b" containerName="collect-profiles" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.198654 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="518046da-5baa-4a0c-af61-f3dced1e531b" containerName="collect-profiles" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.198987 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="518046da-5baa-4a0c-af61-f3dced1e531b" containerName="collect-profiles" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.200686 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.240811 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x9dpx"] Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.318907 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-utilities\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.319061 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54txj\" (UniqueName: \"kubernetes.io/projected/46964d35-555d-4789-a941-1020a5154892-kube-api-access-54txj\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.319116 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-catalog-content\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.420809 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54txj\" (UniqueName: \"kubernetes.io/projected/46964d35-555d-4789-a941-1020a5154892-kube-api-access-54txj\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.420970 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-catalog-content\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.421031 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-utilities\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.421772 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-utilities\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.422279 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-catalog-content\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.453031 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54txj\" (UniqueName: \"kubernetes.io/projected/46964d35-555d-4789-a941-1020a5154892-kube-api-access-54txj\") pod \"certified-operators-x9dpx\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.542572 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:25 crc kubenswrapper[4869]: I0929 15:30:25.811050 4869 scope.go:117] "RemoveContainer" containerID="41d1f569a598631e1dc4147f542e0e95c883855e2d37a739f85663c429f8a0be" Sep 29 15:30:26 crc kubenswrapper[4869]: I0929 15:30:26.196545 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x9dpx"] Sep 29 15:30:26 crc kubenswrapper[4869]: I0929 15:30:26.661628 4869 generic.go:334] "Generic (PLEG): container finished" podID="46964d35-555d-4789-a941-1020a5154892" containerID="1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304" exitCode=0 Sep 29 15:30:26 crc kubenswrapper[4869]: I0929 15:30:26.661744 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9dpx" event={"ID":"46964d35-555d-4789-a941-1020a5154892","Type":"ContainerDied","Data":"1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304"} Sep 29 15:30:26 crc kubenswrapper[4869]: I0929 15:30:26.661981 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9dpx" event={"ID":"46964d35-555d-4789-a941-1020a5154892","Type":"ContainerStarted","Data":"f121c6d2e4147e54c41de4c759de9703190fc43adcca91e55db900d9fa78ed2c"} Sep 29 15:30:28 crc kubenswrapper[4869]: I0929 15:30:28.704676 4869 generic.go:334] "Generic (PLEG): container finished" podID="46964d35-555d-4789-a941-1020a5154892" containerID="d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb" exitCode=0 Sep 29 15:30:28 crc kubenswrapper[4869]: I0929 15:30:28.705495 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9dpx" event={"ID":"46964d35-555d-4789-a941-1020a5154892","Type":"ContainerDied","Data":"d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb"} Sep 29 15:30:29 crc kubenswrapper[4869]: I0929 15:30:29.717958 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9dpx" event={"ID":"46964d35-555d-4789-a941-1020a5154892","Type":"ContainerStarted","Data":"6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774"} Sep 29 15:30:29 crc kubenswrapper[4869]: I0929 15:30:29.737385 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x9dpx" podStartSLOduration=2.133503137 podStartE2EDuration="4.737359081s" podCreationTimestamp="2025-09-29 15:30:25 +0000 UTC" firstStartedPulling="2025-09-29 15:30:26.663563313 +0000 UTC m=+6553.104207643" lastFinishedPulling="2025-09-29 15:30:29.267419257 +0000 UTC m=+6555.708063587" observedRunningTime="2025-09-29 15:30:29.732837823 +0000 UTC m=+6556.173482153" watchObservedRunningTime="2025-09-29 15:30:29.737359081 +0000 UTC m=+6556.178003401" Sep 29 15:30:34 crc kubenswrapper[4869]: I0929 15:30:34.248483 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:30:34 crc kubenswrapper[4869]: E0929 15:30:34.249246 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:30:35 crc kubenswrapper[4869]: I0929 15:30:35.551586 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:35 crc kubenswrapper[4869]: I0929 15:30:35.551692 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:35 crc kubenswrapper[4869]: I0929 15:30:35.607093 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:35 crc kubenswrapper[4869]: I0929 15:30:35.833020 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:35 crc kubenswrapper[4869]: I0929 15:30:35.889649 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x9dpx"] Sep 29 15:30:37 crc kubenswrapper[4869]: I0929 15:30:37.811152 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x9dpx" podUID="46964d35-555d-4789-a941-1020a5154892" containerName="registry-server" containerID="cri-o://6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774" gracePeriod=2 Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.352427 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.449923 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-utilities\") pod \"46964d35-555d-4789-a941-1020a5154892\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.449978 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54txj\" (UniqueName: \"kubernetes.io/projected/46964d35-555d-4789-a941-1020a5154892-kube-api-access-54txj\") pod \"46964d35-555d-4789-a941-1020a5154892\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.450043 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-catalog-content\") pod \"46964d35-555d-4789-a941-1020a5154892\" (UID: \"46964d35-555d-4789-a941-1020a5154892\") " Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.451538 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-utilities" (OuterVolumeSpecName: "utilities") pod "46964d35-555d-4789-a941-1020a5154892" (UID: "46964d35-555d-4789-a941-1020a5154892"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.455155 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46964d35-555d-4789-a941-1020a5154892-kube-api-access-54txj" (OuterVolumeSpecName: "kube-api-access-54txj") pod "46964d35-555d-4789-a941-1020a5154892" (UID: "46964d35-555d-4789-a941-1020a5154892"). InnerVolumeSpecName "kube-api-access-54txj". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.552426 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.552462 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54txj\" (UniqueName: \"kubernetes.io/projected/46964d35-555d-4789-a941-1020a5154892-kube-api-access-54txj\") on node \"crc\" DevicePath \"\"" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.710988 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "46964d35-555d-4789-a941-1020a5154892" (UID: "46964d35-555d-4789-a941-1020a5154892"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.756437 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46964d35-555d-4789-a941-1020a5154892-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.822851 4869 generic.go:334] "Generic (PLEG): container finished" podID="46964d35-555d-4789-a941-1020a5154892" containerID="6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774" exitCode=0 Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.822911 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9dpx" event={"ID":"46964d35-555d-4789-a941-1020a5154892","Type":"ContainerDied","Data":"6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774"} Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.822988 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9dpx" event={"ID":"46964d35-555d-4789-a941-1020a5154892","Type":"ContainerDied","Data":"f121c6d2e4147e54c41de4c759de9703190fc43adcca91e55db900d9fa78ed2c"} Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.823015 4869 scope.go:117] "RemoveContainer" containerID="6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.823094 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9dpx" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.862091 4869 scope.go:117] "RemoveContainer" containerID="d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.890577 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x9dpx"] Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.892309 4869 scope.go:117] "RemoveContainer" containerID="1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.901004 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x9dpx"] Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.941367 4869 scope.go:117] "RemoveContainer" containerID="6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774" Sep 29 15:30:38 crc kubenswrapper[4869]: E0929 15:30:38.942057 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774\": container with ID starting with 6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774 not found: ID does not exist" containerID="6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.942125 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774"} err="failed to get container status \"6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774\": rpc error: code = NotFound desc = could not find container \"6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774\": container with ID starting with 6e641d46e1eafc365fd5405d90d225ef038fec78aa8f39b4898cc558da1be774 not found: ID does not exist" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.942180 4869 scope.go:117] "RemoveContainer" containerID="d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb" Sep 29 15:30:38 crc kubenswrapper[4869]: E0929 15:30:38.942585 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb\": container with ID starting with d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb not found: ID does not exist" containerID="d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.942680 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb"} err="failed to get container status \"d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb\": rpc error: code = NotFound desc = could not find container \"d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb\": container with ID starting with d0d16626f23dd2665bd19358d68eafe78fca49a8ca43bc4ee7244d7a270c8dbb not found: ID does not exist" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.942720 4869 scope.go:117] "RemoveContainer" containerID="1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304" Sep 29 15:30:38 crc kubenswrapper[4869]: E0929 15:30:38.943083 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304\": container with ID starting with 1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304 not found: ID does not exist" containerID="1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304" Sep 29 15:30:38 crc kubenswrapper[4869]: I0929 15:30:38.943145 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304"} err="failed to get container status \"1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304\": rpc error: code = NotFound desc = could not find container \"1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304\": container with ID starting with 1132b3789ac3285bbc5810ccbab7c8364ee3d961c18c6f849ce3c74898d97304 not found: ID does not exist" Sep 29 15:30:40 crc kubenswrapper[4869]: I0929 15:30:40.259930 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46964d35-555d-4789-a941-1020a5154892" path="/var/lib/kubelet/pods/46964d35-555d-4789-a941-1020a5154892/volumes" Sep 29 15:30:49 crc kubenswrapper[4869]: I0929 15:30:49.242557 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:30:49 crc kubenswrapper[4869]: E0929 15:30:49.243388 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.139956 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6rnl6/must-gather-n27l6"] Sep 29 15:30:54 crc kubenswrapper[4869]: E0929 15:30:54.141043 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46964d35-555d-4789-a941-1020a5154892" containerName="extract-utilities" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.141058 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="46964d35-555d-4789-a941-1020a5154892" containerName="extract-utilities" Sep 29 15:30:54 crc kubenswrapper[4869]: E0929 15:30:54.141073 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46964d35-555d-4789-a941-1020a5154892" containerName="registry-server" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.141079 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="46964d35-555d-4789-a941-1020a5154892" containerName="registry-server" Sep 29 15:30:54 crc kubenswrapper[4869]: E0929 15:30:54.141097 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46964d35-555d-4789-a941-1020a5154892" containerName="extract-content" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.141103 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="46964d35-555d-4789-a941-1020a5154892" containerName="extract-content" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.141316 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="46964d35-555d-4789-a941-1020a5154892" containerName="registry-server" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.142526 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.147993 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6rnl6"/"openshift-service-ca.crt" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.148107 4869 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6rnl6"/"kube-root-ca.crt" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.148005 4869 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-6rnl6"/"default-dockercfg-cbxnd" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.163918 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6rnl6/must-gather-n27l6"] Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.222967 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/261b922e-a850-4402-b06c-c40aa26e5a67-must-gather-output\") pod \"must-gather-n27l6\" (UID: \"261b922e-a850-4402-b06c-c40aa26e5a67\") " pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.223144 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdmhz\" (UniqueName: \"kubernetes.io/projected/261b922e-a850-4402-b06c-c40aa26e5a67-kube-api-access-tdmhz\") pod \"must-gather-n27l6\" (UID: \"261b922e-a850-4402-b06c-c40aa26e5a67\") " pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.325092 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/261b922e-a850-4402-b06c-c40aa26e5a67-must-gather-output\") pod \"must-gather-n27l6\" (UID: \"261b922e-a850-4402-b06c-c40aa26e5a67\") " pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.325214 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdmhz\" (UniqueName: \"kubernetes.io/projected/261b922e-a850-4402-b06c-c40aa26e5a67-kube-api-access-tdmhz\") pod \"must-gather-n27l6\" (UID: \"261b922e-a850-4402-b06c-c40aa26e5a67\") " pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.325841 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/261b922e-a850-4402-b06c-c40aa26e5a67-must-gather-output\") pod \"must-gather-n27l6\" (UID: \"261b922e-a850-4402-b06c-c40aa26e5a67\") " pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.368190 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdmhz\" (UniqueName: \"kubernetes.io/projected/261b922e-a850-4402-b06c-c40aa26e5a67-kube-api-access-tdmhz\") pod \"must-gather-n27l6\" (UID: \"261b922e-a850-4402-b06c-c40aa26e5a67\") " pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:30:54 crc kubenswrapper[4869]: I0929 15:30:54.508876 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:30:55 crc kubenswrapper[4869]: I0929 15:30:55.053065 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6rnl6/must-gather-n27l6"] Sep 29 15:30:56 crc kubenswrapper[4869]: I0929 15:30:56.011113 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/must-gather-n27l6" event={"ID":"261b922e-a850-4402-b06c-c40aa26e5a67","Type":"ContainerStarted","Data":"9880597943d3dcf109217cb2204d55f444ef252dfc7b9f0b1d70327e81c51b37"} Sep 29 15:31:03 crc kubenswrapper[4869]: I0929 15:31:03.090795 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/must-gather-n27l6" event={"ID":"261b922e-a850-4402-b06c-c40aa26e5a67","Type":"ContainerStarted","Data":"501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69"} Sep 29 15:31:03 crc kubenswrapper[4869]: I0929 15:31:03.091238 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/must-gather-n27l6" event={"ID":"261b922e-a850-4402-b06c-c40aa26e5a67","Type":"ContainerStarted","Data":"cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d"} Sep 29 15:31:03 crc kubenswrapper[4869]: I0929 15:31:03.113544 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6rnl6/must-gather-n27l6" podStartSLOduration=1.857737405 podStartE2EDuration="9.113512343s" podCreationTimestamp="2025-09-29 15:30:54 +0000 UTC" firstStartedPulling="2025-09-29 15:30:55.047937785 +0000 UTC m=+6581.488582105" lastFinishedPulling="2025-09-29 15:31:02.303712723 +0000 UTC m=+6588.744357043" observedRunningTime="2025-09-29 15:31:03.106008887 +0000 UTC m=+6589.546653217" watchObservedRunningTime="2025-09-29 15:31:03.113512343 +0000 UTC m=+6589.554156663" Sep 29 15:31:04 crc kubenswrapper[4869]: I0929 15:31:04.249052 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:31:04 crc kubenswrapper[4869]: E0929 15:31:04.249639 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:31:05 crc kubenswrapper[4869]: E0929 15:31:05.862087 4869 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.80:35560->38.102.83.80:45233: write tcp 38.102.83.80:35560->38.102.83.80:45233: write: broken pipe Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.451641 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-vzm98"] Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.454871 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.582850 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2480b819-60ba-41c7-ae50-acbe42716e49-host\") pod \"crc-debug-vzm98\" (UID: \"2480b819-60ba-41c7-ae50-acbe42716e49\") " pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.583327 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9xxf\" (UniqueName: \"kubernetes.io/projected/2480b819-60ba-41c7-ae50-acbe42716e49-kube-api-access-l9xxf\") pod \"crc-debug-vzm98\" (UID: \"2480b819-60ba-41c7-ae50-acbe42716e49\") " pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.685802 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2480b819-60ba-41c7-ae50-acbe42716e49-host\") pod \"crc-debug-vzm98\" (UID: \"2480b819-60ba-41c7-ae50-acbe42716e49\") " pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.685944 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9xxf\" (UniqueName: \"kubernetes.io/projected/2480b819-60ba-41c7-ae50-acbe42716e49-kube-api-access-l9xxf\") pod \"crc-debug-vzm98\" (UID: \"2480b819-60ba-41c7-ae50-acbe42716e49\") " pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.685943 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2480b819-60ba-41c7-ae50-acbe42716e49-host\") pod \"crc-debug-vzm98\" (UID: \"2480b819-60ba-41c7-ae50-acbe42716e49\") " pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.709369 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9xxf\" (UniqueName: \"kubernetes.io/projected/2480b819-60ba-41c7-ae50-acbe42716e49-kube-api-access-l9xxf\") pod \"crc-debug-vzm98\" (UID: \"2480b819-60ba-41c7-ae50-acbe42716e49\") " pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:31:07 crc kubenswrapper[4869]: I0929 15:31:07.787182 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:31:08 crc kubenswrapper[4869]: I0929 15:31:08.165934 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-vzm98" event={"ID":"2480b819-60ba-41c7-ae50-acbe42716e49","Type":"ContainerStarted","Data":"0cd30fbd596280149528cbae9c5497b9921aac7cb8294c7aca5bcdf3df338b07"} Sep 29 15:31:17 crc kubenswrapper[4869]: I0929 15:31:17.242805 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:31:17 crc kubenswrapper[4869]: E0929 15:31:17.243683 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:31:19 crc kubenswrapper[4869]: I0929 15:31:19.290768 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-vzm98" event={"ID":"2480b819-60ba-41c7-ae50-acbe42716e49","Type":"ContainerStarted","Data":"c3f16e6cc30b88b4f7816bd834da18d2a029ad2df59e36426bfa9c28822bf21c"} Sep 29 15:31:19 crc kubenswrapper[4869]: I0929 15:31:19.315012 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6rnl6/crc-debug-vzm98" podStartSLOduration=1.33730952 podStartE2EDuration="12.314986319s" podCreationTimestamp="2025-09-29 15:31:07 +0000 UTC" firstStartedPulling="2025-09-29 15:31:07.854653101 +0000 UTC m=+6594.295297421" lastFinishedPulling="2025-09-29 15:31:18.83232991 +0000 UTC m=+6605.272974220" observedRunningTime="2025-09-29 15:31:19.302932534 +0000 UTC m=+6605.743576854" watchObservedRunningTime="2025-09-29 15:31:19.314986319 +0000 UTC m=+6605.755630639" Sep 29 15:31:28 crc kubenswrapper[4869]: I0929 15:31:28.241910 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:31:28 crc kubenswrapper[4869]: E0929 15:31:28.242825 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:31:41 crc kubenswrapper[4869]: I0929 15:31:41.242375 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:31:41 crc kubenswrapper[4869]: E0929 15:31:41.243172 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:31:52 crc kubenswrapper[4869]: I0929 15:31:52.243512 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:31:52 crc kubenswrapper[4869]: E0929 15:31:52.244410 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:32:03 crc kubenswrapper[4869]: I0929 15:32:03.241770 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:32:03 crc kubenswrapper[4869]: E0929 15:32:03.242515 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:32:16 crc kubenswrapper[4869]: I0929 15:32:16.242491 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:32:16 crc kubenswrapper[4869]: E0929 15:32:16.243317 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:32:30 crc kubenswrapper[4869]: I0929 15:32:30.242925 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:32:30 crc kubenswrapper[4869]: E0929 15:32:30.243840 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:32:41 crc kubenswrapper[4869]: I0929 15:32:41.311044 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-645c66bc68-xrb58_d8d34c4e-9d7e-4026-bb9f-0e356149a209/barbican-api/0.log" Sep 29 15:32:41 crc kubenswrapper[4869]: I0929 15:32:41.535416 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-645c66bc68-xrb58_d8d34c4e-9d7e-4026-bb9f-0e356149a209/barbican-api-log/0.log" Sep 29 15:32:41 crc kubenswrapper[4869]: I0929 15:32:41.720541 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-688784644b-5fblt_63bb7554-9ece-4e74-a823-2d1e4489e72c/barbican-keystone-listener/0.log" Sep 29 15:32:41 crc kubenswrapper[4869]: I0929 15:32:41.806446 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-688784644b-5fblt_63bb7554-9ece-4e74-a823-2d1e4489e72c/barbican-keystone-listener-log/0.log" Sep 29 15:32:41 crc kubenswrapper[4869]: I0929 15:32:41.981739 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-68bc68cf7f-ms9sv_628780b7-eee4-4590-b398-ae564ac773a0/barbican-worker/0.log" Sep 29 15:32:42 crc kubenswrapper[4869]: I0929 15:32:42.141397 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-68bc68cf7f-ms9sv_628780b7-eee4-4590-b398-ae564ac773a0/barbican-worker-log/0.log" Sep 29 15:32:42 crc kubenswrapper[4869]: I0929 15:32:42.375781 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-9fz67_cfae165c-93c0-48bb-8106-ec1f4f85ce17/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:42 crc kubenswrapper[4869]: I0929 15:32:42.625245 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a5bafc2b-0923-47c0-845b-6470702822ec/ceilometer-central-agent/0.log" Sep 29 15:32:42 crc kubenswrapper[4869]: I0929 15:32:42.717966 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a5bafc2b-0923-47c0-845b-6470702822ec/ceilometer-notification-agent/0.log" Sep 29 15:32:42 crc kubenswrapper[4869]: I0929 15:32:42.841847 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a5bafc2b-0923-47c0-845b-6470702822ec/proxy-httpd/0.log" Sep 29 15:32:42 crc kubenswrapper[4869]: I0929 15:32:42.965593 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a5bafc2b-0923-47c0-845b-6470702822ec/sg-core/0.log" Sep 29 15:32:43 crc kubenswrapper[4869]: I0929 15:32:43.259321 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-c6vhw_a4536f8d-b2bb-47d1-ac50-093a51f12f65/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:43 crc kubenswrapper[4869]: I0929 15:32:43.479145 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-p46sv_975110b8-8030-4279-9b45-03a9523a9012/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:43 crc kubenswrapper[4869]: I0929 15:32:43.704691 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_627d82b1-f6ec-45d5-a3ca-c633f22f076d/cinder-api-log/0.log" Sep 29 15:32:44 crc kubenswrapper[4869]: I0929 15:32:44.160952 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_404cac7d-3af0-4f4f-bdd8-fc3eec4b512a/probe/0.log" Sep 29 15:32:44 crc kubenswrapper[4869]: I0929 15:32:44.332595 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_627d82b1-f6ec-45d5-a3ca-c633f22f076d/cinder-api/0.log" Sep 29 15:32:44 crc kubenswrapper[4869]: I0929 15:32:44.395670 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_404cac7d-3af0-4f4f-bdd8-fc3eec4b512a/cinder-backup/0.log" Sep 29 15:32:44 crc kubenswrapper[4869]: I0929 15:32:44.542343 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_31cbeb64-7a21-4885-b8ad-9fa5b42508f9/cinder-scheduler/0.log" Sep 29 15:32:44 crc kubenswrapper[4869]: I0929 15:32:44.663662 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_31cbeb64-7a21-4885-b8ad-9fa5b42508f9/probe/0.log" Sep 29 15:32:44 crc kubenswrapper[4869]: I0929 15:32:44.881475 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_482223e1-1a82-46c5-808d-ac8f963c7c09/cinder-volume/0.log" Sep 29 15:32:44 crc kubenswrapper[4869]: I0929 15:32:44.913551 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_482223e1-1a82-46c5-808d-ac8f963c7c09/probe/0.log" Sep 29 15:32:45 crc kubenswrapper[4869]: I0929 15:32:45.157300 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume2-0_9dafc73c-4956-4f69-92c1-e9bb3957e8fe/probe/0.log" Sep 29 15:32:45 crc kubenswrapper[4869]: I0929 15:32:45.176078 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume2-0_9dafc73c-4956-4f69-92c1-e9bb3957e8fe/cinder-volume/0.log" Sep 29 15:32:45 crc kubenswrapper[4869]: I0929 15:32:45.242710 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:32:45 crc kubenswrapper[4869]: E0929 15:32:45.243008 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:32:45 crc kubenswrapper[4869]: I0929 15:32:45.369992 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-plkll_b4aef40c-231d-4e13-a0b7-c8e65c69ce91/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:45 crc kubenswrapper[4869]: I0929 15:32:45.437289 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-sqm8j_5705d408-f08f-47cf-b786-733c2f6f55e2/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:45 crc kubenswrapper[4869]: I0929 15:32:45.610807 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5cdbf8bf5-l6hdp_9ab22b18-ab47-45aa-8967-fec232b92cbb/init/0.log" Sep 29 15:32:45 crc kubenswrapper[4869]: I0929 15:32:45.838379 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5cdbf8bf5-l6hdp_9ab22b18-ab47-45aa-8967-fec232b92cbb/init/0.log" Sep 29 15:32:46 crc kubenswrapper[4869]: I0929 15:32:46.085108 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_d6930ce7-8080-4396-8b97-92edef318edf/glance-httpd/0.log" Sep 29 15:32:46 crc kubenswrapper[4869]: I0929 15:32:46.120571 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_d6930ce7-8080-4396-8b97-92edef318edf/glance-log/0.log" Sep 29 15:32:46 crc kubenswrapper[4869]: I0929 15:32:46.410926 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_07fe876e-5a7e-48ca-b91b-44c5fc9129b2/glance-httpd/0.log" Sep 29 15:32:46 crc kubenswrapper[4869]: I0929 15:32:46.550044 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_07fe876e-5a7e-48ca-b91b-44c5fc9129b2/glance-log/0.log" Sep 29 15:32:46 crc kubenswrapper[4869]: I0929 15:32:46.644117 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5cdbf8bf5-l6hdp_9ab22b18-ab47-45aa-8967-fec232b92cbb/dnsmasq-dns/0.log" Sep 29 15:32:46 crc kubenswrapper[4869]: I0929 15:32:46.861830 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-74f556cb8d-4pwqg_696c6bef-f1c0-4d67-9ca8-ccb6bb489141/horizon/0.log" Sep 29 15:32:47 crc kubenswrapper[4869]: I0929 15:32:47.196370 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-htp7b_9f5b7d60-1238-45f5-a03f-28ede1b33ce0/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:47 crc kubenswrapper[4869]: I0929 15:32:47.424889 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-74f556cb8d-4pwqg_696c6bef-f1c0-4d67-9ca8-ccb6bb489141/horizon-log/0.log" Sep 29 15:32:47 crc kubenswrapper[4869]: I0929 15:32:47.462363 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-v2crd_c2d2928f-faba-4b25-90fb-dcaa88a3b515/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:47 crc kubenswrapper[4869]: I0929 15:32:47.786537 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29319301-ml6vq_7ae04f1c-5944-43ce-a66d-2e04f96cc301/keystone-cron/0.log" Sep 29 15:32:48 crc kubenswrapper[4869]: I0929 15:32:48.031909 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_a11f831b-4af3-43cd-a0d0-0499b3e5e084/kube-state-metrics/0.log" Sep 29 15:32:48 crc kubenswrapper[4869]: I0929 15:32:48.261755 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5b99b5b7f7-z8lvm_cef8fcc7-7d06-439c-8d41-1948f9fbda1b/keystone-api/0.log" Sep 29 15:32:48 crc kubenswrapper[4869]: I0929 15:32:48.311321 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-gmhsm_03e348b9-33ba-41f9-ac42-792fd12e4e7c/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:49 crc kubenswrapper[4869]: I0929 15:32:49.086806 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-984cbc747-hdvk6_01428588-d0f3-4d76-b537-2daec9cefe31/neutron-httpd/0.log" Sep 29 15:32:49 crc kubenswrapper[4869]: I0929 15:32:49.144720 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-984cbc747-hdvk6_01428588-d0f3-4d76-b537-2daec9cefe31/neutron-api/0.log" Sep 29 15:32:49 crc kubenswrapper[4869]: I0929 15:32:49.422634 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-l666f_9edd7e05-abde-4406-af59-37f1ea0e8b73/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:50 crc kubenswrapper[4869]: I0929 15:32:50.877063 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_f980558d-d08e-40fc-a8f7-88a3b88f2b56/nova-cell0-conductor-conductor/0.log" Sep 29 15:32:52 crc kubenswrapper[4869]: I0929 15:32:52.039250 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_910de1ee-2bba-48cc-bd43-235cec60e09d/nova-cell1-conductor-conductor/0.log" Sep 29 15:32:52 crc kubenswrapper[4869]: I0929 15:32:52.180379 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_71dcb465-3ed7-4c66-a3df-0ab996e7c726/nova-api-log/0.log" Sep 29 15:32:52 crc kubenswrapper[4869]: I0929 15:32:52.497739 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_71dcb465-3ed7-4c66-a3df-0ab996e7c726/nova-api-api/0.log" Sep 29 15:32:52 crc kubenswrapper[4869]: I0929 15:32:52.551093 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_5da9a418-1287-499a-91b7-a2bbed6a18ef/nova-cell1-novncproxy-novncproxy/0.log" Sep 29 15:32:52 crc kubenswrapper[4869]: I0929 15:32:52.959397 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-scm4r_2a0f54ec-6749-43a1-81d5-064a45a1d715/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:53 crc kubenswrapper[4869]: I0929 15:32:53.047968 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_0c518901-c1c9-4b38-9f36-c65428e1937b/nova-metadata-log/0.log" Sep 29 15:32:53 crc kubenswrapper[4869]: I0929 15:32:53.643038 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d3b25f43-ab33-4712-ac4c-70cf4bba6ce2/mysql-bootstrap/0.log" Sep 29 15:32:53 crc kubenswrapper[4869]: I0929 15:32:53.648270 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_c4815a64-2248-4f94-b671-d62ef921dd68/nova-scheduler-scheduler/0.log" Sep 29 15:32:53 crc kubenswrapper[4869]: I0929 15:32:53.943120 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d3b25f43-ab33-4712-ac4c-70cf4bba6ce2/galera/0.log" Sep 29 15:32:53 crc kubenswrapper[4869]: I0929 15:32:53.991438 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d3b25f43-ab33-4712-ac4c-70cf4bba6ce2/mysql-bootstrap/0.log" Sep 29 15:32:54 crc kubenswrapper[4869]: I0929 15:32:54.308643 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_12b51255-a7f5-4295-9367-e8198b8a5c55/mysql-bootstrap/0.log" Sep 29 15:32:54 crc kubenswrapper[4869]: I0929 15:32:54.503812 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_12b51255-a7f5-4295-9367-e8198b8a5c55/mysql-bootstrap/0.log" Sep 29 15:32:54 crc kubenswrapper[4869]: I0929 15:32:54.640088 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_12b51255-a7f5-4295-9367-e8198b8a5c55/galera/0.log" Sep 29 15:32:54 crc kubenswrapper[4869]: I0929 15:32:54.869725 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_f3799b3a-38c7-44fc-8f60-1fd75e2dd751/openstackclient/0.log" Sep 29 15:32:55 crc kubenswrapper[4869]: I0929 15:32:55.089618 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-jhwdk_6dbad8f0-0816-40ae-b0b4-d6602f352641/ovn-controller/0.log" Sep 29 15:32:55 crc kubenswrapper[4869]: I0929 15:32:55.311713 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wgzgq_6489e730-0811-4b9e-a82c-a36987e0db21/openstack-network-exporter/0.log" Sep 29 15:32:55 crc kubenswrapper[4869]: I0929 15:32:55.598506 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4w2zq_c1f79c03-e945-4593-843b-9d9c5f893970/ovsdb-server-init/0.log" Sep 29 15:32:55 crc kubenswrapper[4869]: I0929 15:32:55.993571 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4w2zq_c1f79c03-e945-4593-843b-9d9c5f893970/ovsdb-server-init/0.log" Sep 29 15:32:56 crc kubenswrapper[4869]: I0929 15:32:56.257929 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4w2zq_c1f79c03-e945-4593-843b-9d9c5f893970/ovsdb-server/0.log" Sep 29 15:32:56 crc kubenswrapper[4869]: I0929 15:32:56.399677 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4w2zq_c1f79c03-e945-4593-843b-9d9c5f893970/ovs-vswitchd/0.log" Sep 29 15:32:56 crc kubenswrapper[4869]: I0929 15:32:56.410394 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_0c518901-c1c9-4b38-9f36-c65428e1937b/nova-metadata-metadata/0.log" Sep 29 15:32:56 crc kubenswrapper[4869]: I0929 15:32:56.750077 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-hgg68_694027a0-4ac3-49a7-ab93-7022d098b091/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:32:56 crc kubenswrapper[4869]: I0929 15:32:56.964308 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1c485924-251a-4fdb-9aef-d32332da2662/openstack-network-exporter/0.log" Sep 29 15:32:57 crc kubenswrapper[4869]: I0929 15:32:57.071111 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1c485924-251a-4fdb-9aef-d32332da2662/ovn-northd/0.log" Sep 29 15:32:57 crc kubenswrapper[4869]: I0929 15:32:57.207714 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9a91d438-85a7-4b76-ac91-509403d09f26/openstack-network-exporter/0.log" Sep 29 15:32:57 crc kubenswrapper[4869]: I0929 15:32:57.349574 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9a91d438-85a7-4b76-ac91-509403d09f26/ovsdbserver-nb/0.log" Sep 29 15:32:57 crc kubenswrapper[4869]: I0929 15:32:57.462411 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_39ea57f0-7600-4ca4-912d-c429465aca86/openstack-network-exporter/0.log" Sep 29 15:32:57 crc kubenswrapper[4869]: I0929 15:32:57.557009 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_39ea57f0-7600-4ca4-912d-c429465aca86/ovsdbserver-sb/0.log" Sep 29 15:32:57 crc kubenswrapper[4869]: I0929 15:32:57.953807 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5cbb465496-fhs6h_718a89d5-d692-49e1-8e66-c647ca06125e/placement-api/0.log" Sep 29 15:32:58 crc kubenswrapper[4869]: I0929 15:32:58.123139 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5cbb465496-fhs6h_718a89d5-d692-49e1-8e66-c647ca06125e/placement-log/0.log" Sep 29 15:32:58 crc kubenswrapper[4869]: I0929 15:32:58.257937 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_26e88dc5-69ae-4a63-b9ff-b81d4bc78079/init-config-reloader/0.log" Sep 29 15:32:58 crc kubenswrapper[4869]: I0929 15:32:58.491133 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_26e88dc5-69ae-4a63-b9ff-b81d4bc78079/prometheus/0.log" Sep 29 15:32:58 crc kubenswrapper[4869]: I0929 15:32:58.517320 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_26e88dc5-69ae-4a63-b9ff-b81d4bc78079/init-config-reloader/0.log" Sep 29 15:32:58 crc kubenswrapper[4869]: I0929 15:32:58.529916 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_26e88dc5-69ae-4a63-b9ff-b81d4bc78079/config-reloader/0.log" Sep 29 15:32:58 crc kubenswrapper[4869]: I0929 15:32:58.746369 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_3b99cc79-2a53-4c36-ba65-c45598593017/setup-container/0.log" Sep 29 15:32:58 crc kubenswrapper[4869]: I0929 15:32:58.753374 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_26e88dc5-69ae-4a63-b9ff-b81d4bc78079/thanos-sidecar/0.log" Sep 29 15:32:59 crc kubenswrapper[4869]: I0929 15:32:59.049769 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_3b99cc79-2a53-4c36-ba65-c45598593017/setup-container/0.log" Sep 29 15:32:59 crc kubenswrapper[4869]: I0929 15:32:59.156229 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_3b99cc79-2a53-4c36-ba65-c45598593017/rabbitmq/0.log" Sep 29 15:32:59 crc kubenswrapper[4869]: I0929 15:32:59.242874 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:32:59 crc kubenswrapper[4869]: E0929 15:32:59.243362 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:32:59 crc kubenswrapper[4869]: I0929 15:32:59.291911 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_0b77ed89-e796-4138-ae2c-fcd5f2125233/setup-container/0.log" Sep 29 15:32:59 crc kubenswrapper[4869]: I0929 15:32:59.513406 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_0b77ed89-e796-4138-ae2c-fcd5f2125233/setup-container/0.log" Sep 29 15:32:59 crc kubenswrapper[4869]: I0929 15:32:59.523116 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_0b77ed89-e796-4138-ae2c-fcd5f2125233/rabbitmq/0.log" Sep 29 15:32:59 crc kubenswrapper[4869]: I0929 15:32:59.772058 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_3cce7bdd-3959-48a5-a69b-fdf27672879a/setup-container/0.log" Sep 29 15:33:00 crc kubenswrapper[4869]: I0929 15:33:00.001524 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_3cce7bdd-3959-48a5-a69b-fdf27672879a/rabbitmq/0.log" Sep 29 15:33:00 crc kubenswrapper[4869]: I0929 15:33:00.035154 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_3cce7bdd-3959-48a5-a69b-fdf27672879a/setup-container/0.log" Sep 29 15:33:00 crc kubenswrapper[4869]: I0929 15:33:00.282787 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-v8wkg_aee823b2-67f9-4014-8015-f9094f636f20/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:33:00 crc kubenswrapper[4869]: I0929 15:33:00.344160 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-znqm4_aaba9b54-27f6-4b48-a3fb-63ed25d6b93f/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:33:00 crc kubenswrapper[4869]: I0929 15:33:00.542768 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-v56q5_6e87d90c-73d6-4e83-87b9-09dc55557160/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:33:00 crc kubenswrapper[4869]: I0929 15:33:00.809908 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-gv9vj_bebe6640-f31f-4487-9e19-8a829719abfd/ssh-known-hosts-edpm-deployment/0.log" Sep 29 15:33:01 crc kubenswrapper[4869]: I0929 15:33:01.000473 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-7vx66_7417019a-14a4-45f5-99d1-b7b84efb665e/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:33:01 crc kubenswrapper[4869]: I0929 15:33:01.331295 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_cd829a5f-7d4e-443d-8f1c-815fc55af7b7/test-operator-logs-container/0.log" Sep 29 15:33:01 crc kubenswrapper[4869]: I0929 15:33:01.527463 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_3b562fc4-b928-4883-8bd5-2db40da004d0/tempest-tests-tempest-tests-runner/0.log" Sep 29 15:33:01 crc kubenswrapper[4869]: I0929 15:33:01.593120 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-chn5f_e5e5263f-9a85-4a85-bfb5-20dea2039fad/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Sep 29 15:33:02 crc kubenswrapper[4869]: I0929 15:33:02.907741 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_2bccd1d9-e5f1-4608-ac93-e364aac95e6c/watcher-applier/0.log" Sep 29 15:33:05 crc kubenswrapper[4869]: I0929 15:33:05.783682 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_95555888-1c52-4e91-ac6c-b85c38094784/watcher-api-log/0.log" Sep 29 15:33:06 crc kubenswrapper[4869]: I0929 15:33:06.120734 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_103100a6-0dbb-481c-ba0e-4e7a2e5c38f6/watcher-decision-engine/2.log" Sep 29 15:33:10 crc kubenswrapper[4869]: I0929 15:33:10.243405 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:33:10 crc kubenswrapper[4869]: E0929 15:33:10.244371 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:33:10 crc kubenswrapper[4869]: I0929 15:33:10.411058 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_103100a6-0dbb-481c-ba0e-4e7a2e5c38f6/watcher-decision-engine/3.log" Sep 29 15:33:11 crc kubenswrapper[4869]: I0929 15:33:11.985004 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_95555888-1c52-4e91-ac6c-b85c38094784/watcher-api/0.log" Sep 29 15:33:17 crc kubenswrapper[4869]: I0929 15:33:17.102112 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6168192e-6336-436d-9883-f9608ade43dc/memcached/0.log" Sep 29 15:33:21 crc kubenswrapper[4869]: I0929 15:33:21.242519 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:33:21 crc kubenswrapper[4869]: E0929 15:33:21.243324 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.547843 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x9qlr"] Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.555994 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.559198 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-catalog-content\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.559311 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4clqh\" (UniqueName: \"kubernetes.io/projected/8daa1315-5240-47a5-b3aa-d42b61387c39-kube-api-access-4clqh\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.559577 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-utilities\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.565678 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x9qlr"] Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.662181 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4clqh\" (UniqueName: \"kubernetes.io/projected/8daa1315-5240-47a5-b3aa-d42b61387c39-kube-api-access-4clqh\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.662273 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-utilities\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.662397 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-catalog-content\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.663101 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-catalog-content\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.665381 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-utilities\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.689636 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4clqh\" (UniqueName: \"kubernetes.io/projected/8daa1315-5240-47a5-b3aa-d42b61387c39-kube-api-access-4clqh\") pod \"community-operators-x9qlr\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:29 crc kubenswrapper[4869]: I0929 15:33:29.892130 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:30 crc kubenswrapper[4869]: I0929 15:33:30.498443 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x9qlr"] Sep 29 15:33:30 crc kubenswrapper[4869]: I0929 15:33:30.730462 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9qlr" event={"ID":"8daa1315-5240-47a5-b3aa-d42b61387c39","Type":"ContainerStarted","Data":"086a6d29c60e85ab95e73ced66c3299dea022ae6c2bbd6d738833f2ea0b0ac3c"} Sep 29 15:33:30 crc kubenswrapper[4869]: I0929 15:33:30.730515 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9qlr" event={"ID":"8daa1315-5240-47a5-b3aa-d42b61387c39","Type":"ContainerStarted","Data":"5e312a09a8b1cf412d077689cf4b1c9a2bbf6b606dc8e9979bff2594bfd881e2"} Sep 29 15:33:31 crc kubenswrapper[4869]: I0929 15:33:31.744067 4869 generic.go:334] "Generic (PLEG): container finished" podID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerID="086a6d29c60e85ab95e73ced66c3299dea022ae6c2bbd6d738833f2ea0b0ac3c" exitCode=0 Sep 29 15:33:31 crc kubenswrapper[4869]: I0929 15:33:31.744191 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9qlr" event={"ID":"8daa1315-5240-47a5-b3aa-d42b61387c39","Type":"ContainerDied","Data":"086a6d29c60e85ab95e73ced66c3299dea022ae6c2bbd6d738833f2ea0b0ac3c"} Sep 29 15:33:33 crc kubenswrapper[4869]: I0929 15:33:33.243329 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:33:33 crc kubenswrapper[4869]: E0929 15:33:33.243991 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:33:33 crc kubenswrapper[4869]: I0929 15:33:33.766793 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9qlr" event={"ID":"8daa1315-5240-47a5-b3aa-d42b61387c39","Type":"ContainerStarted","Data":"79669e812babc872aadb0e1bf465fc803e77bef8a6255e7fe6d25c307078cad0"} Sep 29 15:33:34 crc kubenswrapper[4869]: I0929 15:33:34.780533 4869 generic.go:334] "Generic (PLEG): container finished" podID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerID="79669e812babc872aadb0e1bf465fc803e77bef8a6255e7fe6d25c307078cad0" exitCode=0 Sep 29 15:33:34 crc kubenswrapper[4869]: I0929 15:33:34.780630 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9qlr" event={"ID":"8daa1315-5240-47a5-b3aa-d42b61387c39","Type":"ContainerDied","Data":"79669e812babc872aadb0e1bf465fc803e77bef8a6255e7fe6d25c307078cad0"} Sep 29 15:33:35 crc kubenswrapper[4869]: I0929 15:33:35.792221 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9qlr" event={"ID":"8daa1315-5240-47a5-b3aa-d42b61387c39","Type":"ContainerStarted","Data":"9d8de698c3a197490e90fc0a0d708230ff66e98ece458da9f0c7116309df1e3d"} Sep 29 15:33:35 crc kubenswrapper[4869]: I0929 15:33:35.819265 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x9qlr" podStartSLOduration=3.122026848 podStartE2EDuration="6.819234765s" podCreationTimestamp="2025-09-29 15:33:29 +0000 UTC" firstStartedPulling="2025-09-29 15:33:31.746598345 +0000 UTC m=+6738.187242665" lastFinishedPulling="2025-09-29 15:33:35.443806262 +0000 UTC m=+6741.884450582" observedRunningTime="2025-09-29 15:33:35.818914687 +0000 UTC m=+6742.259559027" watchObservedRunningTime="2025-09-29 15:33:35.819234765 +0000 UTC m=+6742.259879085" Sep 29 15:33:39 crc kubenswrapper[4869]: I0929 15:33:39.892265 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:39 crc kubenswrapper[4869]: I0929 15:33:39.892964 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:39 crc kubenswrapper[4869]: I0929 15:33:39.945794 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:40 crc kubenswrapper[4869]: I0929 15:33:40.896119 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.315900 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ktw8x"] Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.318769 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.331396 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktw8x"] Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.447713 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9h2q\" (UniqueName: \"kubernetes.io/projected/e44f255e-26d9-47d5-a313-4c87e6a80ddb-kube-api-access-p9h2q\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.447786 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-catalog-content\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.447976 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-utilities\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.500992 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lxbfv"] Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.503357 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.527443 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lxbfv"] Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.550847 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-utilities\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.550918 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-utilities\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.551077 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9h2q\" (UniqueName: \"kubernetes.io/projected/e44f255e-26d9-47d5-a313-4c87e6a80ddb-kube-api-access-p9h2q\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.551480 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-catalog-content\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.551734 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-catalog-content\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.573458 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9h2q\" (UniqueName: \"kubernetes.io/projected/e44f255e-26d9-47d5-a313-4c87e6a80ddb-kube-api-access-p9h2q\") pod \"redhat-marketplace-ktw8x\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.650068 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.653771 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-catalog-content\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.654561 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4dgc\" (UniqueName: \"kubernetes.io/projected/8556b1ac-992f-409f-95d7-66299f092c8a-kube-api-access-p4dgc\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.654640 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-utilities\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.757057 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4dgc\" (UniqueName: \"kubernetes.io/projected/8556b1ac-992f-409f-95d7-66299f092c8a-kube-api-access-p4dgc\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.757121 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-utilities\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.757213 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-catalog-content\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.757850 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-catalog-content\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.757910 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-utilities\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.777775 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4dgc\" (UniqueName: \"kubernetes.io/projected/8556b1ac-992f-409f-95d7-66299f092c8a-kube-api-access-p4dgc\") pod \"redhat-operators-lxbfv\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:43 crc kubenswrapper[4869]: I0929 15:33:43.834249 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:44 crc kubenswrapper[4869]: W0929 15:33:44.292755 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode44f255e_26d9_47d5_a313_4c87e6a80ddb.slice/crio-75d072ca365e40d6460fcf8cf6031c01162e318e84c580af2dbd4c87047df22e WatchSource:0}: Error finding container 75d072ca365e40d6460fcf8cf6031c01162e318e84c580af2dbd4c87047df22e: Status 404 returned error can't find the container with id 75d072ca365e40d6460fcf8cf6031c01162e318e84c580af2dbd4c87047df22e Sep 29 15:33:44 crc kubenswrapper[4869]: I0929 15:33:44.299799 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktw8x"] Sep 29 15:33:44 crc kubenswrapper[4869]: I0929 15:33:44.461803 4869 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lxbfv"] Sep 29 15:33:44 crc kubenswrapper[4869]: I0929 15:33:44.894772 4869 generic.go:334] "Generic (PLEG): container finished" podID="8556b1ac-992f-409f-95d7-66299f092c8a" containerID="36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16" exitCode=0 Sep 29 15:33:44 crc kubenswrapper[4869]: I0929 15:33:44.894872 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbfv" event={"ID":"8556b1ac-992f-409f-95d7-66299f092c8a","Type":"ContainerDied","Data":"36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16"} Sep 29 15:33:44 crc kubenswrapper[4869]: I0929 15:33:44.895123 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbfv" event={"ID":"8556b1ac-992f-409f-95d7-66299f092c8a","Type":"ContainerStarted","Data":"291b07081647c8c3c3c671c4b9fea3da8e16573182d235cf80b472d9b9fdfbf1"} Sep 29 15:33:44 crc kubenswrapper[4869]: I0929 15:33:44.897645 4869 generic.go:334] "Generic (PLEG): container finished" podID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerID="aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386" exitCode=0 Sep 29 15:33:44 crc kubenswrapper[4869]: I0929 15:33:44.897703 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktw8x" event={"ID":"e44f255e-26d9-47d5-a313-4c87e6a80ddb","Type":"ContainerDied","Data":"aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386"} Sep 29 15:33:44 crc kubenswrapper[4869]: I0929 15:33:44.897736 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktw8x" event={"ID":"e44f255e-26d9-47d5-a313-4c87e6a80ddb","Type":"ContainerStarted","Data":"75d072ca365e40d6460fcf8cf6031c01162e318e84c580af2dbd4c87047df22e"} Sep 29 15:33:45 crc kubenswrapper[4869]: I0929 15:33:45.911137 4869 generic.go:334] "Generic (PLEG): container finished" podID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerID="4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592" exitCode=0 Sep 29 15:33:45 crc kubenswrapper[4869]: I0929 15:33:45.911193 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktw8x" event={"ID":"e44f255e-26d9-47d5-a313-4c87e6a80ddb","Type":"ContainerDied","Data":"4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592"} Sep 29 15:33:46 crc kubenswrapper[4869]: I0929 15:33:46.924587 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbfv" event={"ID":"8556b1ac-992f-409f-95d7-66299f092c8a","Type":"ContainerStarted","Data":"0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9"} Sep 29 15:33:46 crc kubenswrapper[4869]: I0929 15:33:46.927487 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktw8x" event={"ID":"e44f255e-26d9-47d5-a313-4c87e6a80ddb","Type":"ContainerStarted","Data":"9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4"} Sep 29 15:33:46 crc kubenswrapper[4869]: I0929 15:33:46.975734 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ktw8x" podStartSLOduration=2.561169718 podStartE2EDuration="3.975705249s" podCreationTimestamp="2025-09-29 15:33:43 +0000 UTC" firstStartedPulling="2025-09-29 15:33:44.899585323 +0000 UTC m=+6751.340229643" lastFinishedPulling="2025-09-29 15:33:46.314120864 +0000 UTC m=+6752.754765174" observedRunningTime="2025-09-29 15:33:46.968124331 +0000 UTC m=+6753.408768651" watchObservedRunningTime="2025-09-29 15:33:46.975705249 +0000 UTC m=+6753.416349559" Sep 29 15:33:47 crc kubenswrapper[4869]: I0929 15:33:47.242843 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:33:47 crc kubenswrapper[4869]: E0929 15:33:47.243264 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:33:48 crc kubenswrapper[4869]: I0929 15:33:48.295974 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x9qlr"] Sep 29 15:33:48 crc kubenswrapper[4869]: I0929 15:33:48.296496 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x9qlr" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerName="registry-server" containerID="cri-o://9d8de698c3a197490e90fc0a0d708230ff66e98ece458da9f0c7116309df1e3d" gracePeriod=2 Sep 29 15:33:48 crc kubenswrapper[4869]: I0929 15:33:48.953990 4869 generic.go:334] "Generic (PLEG): container finished" podID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerID="9d8de698c3a197490e90fc0a0d708230ff66e98ece458da9f0c7116309df1e3d" exitCode=0 Sep 29 15:33:48 crc kubenswrapper[4869]: I0929 15:33:48.954056 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9qlr" event={"ID":"8daa1315-5240-47a5-b3aa-d42b61387c39","Type":"ContainerDied","Data":"9d8de698c3a197490e90fc0a0d708230ff66e98ece458da9f0c7116309df1e3d"} Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.354315 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.529269 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-utilities\") pod \"8daa1315-5240-47a5-b3aa-d42b61387c39\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.529442 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-catalog-content\") pod \"8daa1315-5240-47a5-b3aa-d42b61387c39\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.529564 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4clqh\" (UniqueName: \"kubernetes.io/projected/8daa1315-5240-47a5-b3aa-d42b61387c39-kube-api-access-4clqh\") pod \"8daa1315-5240-47a5-b3aa-d42b61387c39\" (UID: \"8daa1315-5240-47a5-b3aa-d42b61387c39\") " Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.530096 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-utilities" (OuterVolumeSpecName: "utilities") pod "8daa1315-5240-47a5-b3aa-d42b61387c39" (UID: "8daa1315-5240-47a5-b3aa-d42b61387c39"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.530535 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.560021 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8daa1315-5240-47a5-b3aa-d42b61387c39-kube-api-access-4clqh" (OuterVolumeSpecName: "kube-api-access-4clqh") pod "8daa1315-5240-47a5-b3aa-d42b61387c39" (UID: "8daa1315-5240-47a5-b3aa-d42b61387c39"). InnerVolumeSpecName "kube-api-access-4clqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.572574 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8daa1315-5240-47a5-b3aa-d42b61387c39" (UID: "8daa1315-5240-47a5-b3aa-d42b61387c39"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.631912 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8daa1315-5240-47a5-b3aa-d42b61387c39-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.632453 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4clqh\" (UniqueName: \"kubernetes.io/projected/8daa1315-5240-47a5-b3aa-d42b61387c39-kube-api-access-4clqh\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.972369 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9qlr" event={"ID":"8daa1315-5240-47a5-b3aa-d42b61387c39","Type":"ContainerDied","Data":"5e312a09a8b1cf412d077689cf4b1c9a2bbf6b606dc8e9979bff2594bfd881e2"} Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.972426 4869 scope.go:117] "RemoveContainer" containerID="9d8de698c3a197490e90fc0a0d708230ff66e98ece458da9f0c7116309df1e3d" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.972588 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x9qlr" Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.979952 4869 generic.go:334] "Generic (PLEG): container finished" podID="8556b1ac-992f-409f-95d7-66299f092c8a" containerID="0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9" exitCode=0 Sep 29 15:33:49 crc kubenswrapper[4869]: I0929 15:33:49.980023 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbfv" event={"ID":"8556b1ac-992f-409f-95d7-66299f092c8a","Type":"ContainerDied","Data":"0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9"} Sep 29 15:33:50 crc kubenswrapper[4869]: I0929 15:33:50.003750 4869 scope.go:117] "RemoveContainer" containerID="79669e812babc872aadb0e1bf465fc803e77bef8a6255e7fe6d25c307078cad0" Sep 29 15:33:50 crc kubenswrapper[4869]: I0929 15:33:50.032138 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x9qlr"] Sep 29 15:33:50 crc kubenswrapper[4869]: I0929 15:33:50.034234 4869 scope.go:117] "RemoveContainer" containerID="086a6d29c60e85ab95e73ced66c3299dea022ae6c2bbd6d738833f2ea0b0ac3c" Sep 29 15:33:50 crc kubenswrapper[4869]: I0929 15:33:50.042780 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x9qlr"] Sep 29 15:33:50 crc kubenswrapper[4869]: I0929 15:33:50.254937 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" path="/var/lib/kubelet/pods/8daa1315-5240-47a5-b3aa-d42b61387c39/volumes" Sep 29 15:33:52 crc kubenswrapper[4869]: I0929 15:33:52.028987 4869 generic.go:334] "Generic (PLEG): container finished" podID="2480b819-60ba-41c7-ae50-acbe42716e49" containerID="c3f16e6cc30b88b4f7816bd834da18d2a029ad2df59e36426bfa9c28822bf21c" exitCode=0 Sep 29 15:33:52 crc kubenswrapper[4869]: I0929 15:33:52.029069 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-vzm98" event={"ID":"2480b819-60ba-41c7-ae50-acbe42716e49","Type":"ContainerDied","Data":"c3f16e6cc30b88b4f7816bd834da18d2a029ad2df59e36426bfa9c28822bf21c"} Sep 29 15:33:52 crc kubenswrapper[4869]: I0929 15:33:52.034963 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbfv" event={"ID":"8556b1ac-992f-409f-95d7-66299f092c8a","Type":"ContainerStarted","Data":"d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a"} Sep 29 15:33:52 crc kubenswrapper[4869]: I0929 15:33:52.088439 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lxbfv" podStartSLOduration=3.191098468 podStartE2EDuration="9.088415721s" podCreationTimestamp="2025-09-29 15:33:43 +0000 UTC" firstStartedPulling="2025-09-29 15:33:44.896952565 +0000 UTC m=+6751.337596895" lastFinishedPulling="2025-09-29 15:33:50.794269828 +0000 UTC m=+6757.234914148" observedRunningTime="2025-09-29 15:33:52.082107026 +0000 UTC m=+6758.522751346" watchObservedRunningTime="2025-09-29 15:33:52.088415721 +0000 UTC m=+6758.529060041" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.156313 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.193108 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-vzm98"] Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.203753 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-vzm98"] Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.237223 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9xxf\" (UniqueName: \"kubernetes.io/projected/2480b819-60ba-41c7-ae50-acbe42716e49-kube-api-access-l9xxf\") pod \"2480b819-60ba-41c7-ae50-acbe42716e49\" (UID: \"2480b819-60ba-41c7-ae50-acbe42716e49\") " Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.237712 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2480b819-60ba-41c7-ae50-acbe42716e49-host\") pod \"2480b819-60ba-41c7-ae50-acbe42716e49\" (UID: \"2480b819-60ba-41c7-ae50-acbe42716e49\") " Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.237872 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2480b819-60ba-41c7-ae50-acbe42716e49-host" (OuterVolumeSpecName: "host") pod "2480b819-60ba-41c7-ae50-acbe42716e49" (UID: "2480b819-60ba-41c7-ae50-acbe42716e49"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.238240 4869 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2480b819-60ba-41c7-ae50-acbe42716e49-host\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.242834 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2480b819-60ba-41c7-ae50-acbe42716e49-kube-api-access-l9xxf" (OuterVolumeSpecName: "kube-api-access-l9xxf") pod "2480b819-60ba-41c7-ae50-acbe42716e49" (UID: "2480b819-60ba-41c7-ae50-acbe42716e49"). InnerVolumeSpecName "kube-api-access-l9xxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.339946 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9xxf\" (UniqueName: \"kubernetes.io/projected/2480b819-60ba-41c7-ae50-acbe42716e49-kube-api-access-l9xxf\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.650307 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.650356 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.704867 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.835441 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:53 crc kubenswrapper[4869]: I0929 15:33:53.835493 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.056820 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-vzm98" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.056835 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cd30fbd596280149528cbae9c5497b9921aac7cb8294c7aca5bcdf3df338b07" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.114811 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.266931 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2480b819-60ba-41c7-ae50-acbe42716e49" path="/var/lib/kubelet/pods/2480b819-60ba-41c7-ae50-acbe42716e49/volumes" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.401926 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-jqd4f"] Sep 29 15:33:54 crc kubenswrapper[4869]: E0929 15:33:54.402418 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerName="registry-server" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.402444 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerName="registry-server" Sep 29 15:33:54 crc kubenswrapper[4869]: E0929 15:33:54.402486 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerName="extract-content" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.402497 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerName="extract-content" Sep 29 15:33:54 crc kubenswrapper[4869]: E0929 15:33:54.402508 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2480b819-60ba-41c7-ae50-acbe42716e49" containerName="container-00" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.402515 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="2480b819-60ba-41c7-ae50-acbe42716e49" containerName="container-00" Sep 29 15:33:54 crc kubenswrapper[4869]: E0929 15:33:54.402549 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerName="extract-utilities" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.402557 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerName="extract-utilities" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.402816 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="8daa1315-5240-47a5-b3aa-d42b61387c39" containerName="registry-server" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.402854 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="2480b819-60ba-41c7-ae50-acbe42716e49" containerName="container-00" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.403853 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.574340 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-host\") pod \"crc-debug-jqd4f\" (UID: \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\") " pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.574416 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsf2v\" (UniqueName: \"kubernetes.io/projected/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-kube-api-access-jsf2v\") pod \"crc-debug-jqd4f\" (UID: \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\") " pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.676547 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-host\") pod \"crc-debug-jqd4f\" (UID: \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\") " pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.676651 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsf2v\" (UniqueName: \"kubernetes.io/projected/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-kube-api-access-jsf2v\") pod \"crc-debug-jqd4f\" (UID: \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\") " pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.676792 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-host\") pod \"crc-debug-jqd4f\" (UID: \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\") " pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.696242 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsf2v\" (UniqueName: \"kubernetes.io/projected/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-kube-api-access-jsf2v\") pod \"crc-debug-jqd4f\" (UID: \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\") " pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.727341 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:54 crc kubenswrapper[4869]: W0929 15:33:54.763902 4869 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64d09ba1_2fbd_45b1_bc9d_2e6cdfabac35.slice/crio-17218f0fd261323ba27c8a636e4cac1abf819d8298816b9fdb4d3fe877f1dca3 WatchSource:0}: Error finding container 17218f0fd261323ba27c8a636e4cac1abf819d8298816b9fdb4d3fe877f1dca3: Status 404 returned error can't find the container with id 17218f0fd261323ba27c8a636e4cac1abf819d8298816b9fdb4d3fe877f1dca3 Sep 29 15:33:54 crc kubenswrapper[4869]: I0929 15:33:54.896364 4869 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lxbfv" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="registry-server" probeResult="failure" output=< Sep 29 15:33:54 crc kubenswrapper[4869]: timeout: failed to connect service ":50051" within 1s Sep 29 15:33:54 crc kubenswrapper[4869]: > Sep 29 15:33:55 crc kubenswrapper[4869]: I0929 15:33:55.075208 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" event={"ID":"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35","Type":"ContainerStarted","Data":"1f7bc29bf5dca01fa05adc11b4ccf2d4c359b0617480f64a08a5732a94bd7b79"} Sep 29 15:33:55 crc kubenswrapper[4869]: I0929 15:33:55.075253 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" event={"ID":"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35","Type":"ContainerStarted","Data":"17218f0fd261323ba27c8a636e4cac1abf819d8298816b9fdb4d3fe877f1dca3"} Sep 29 15:33:55 crc kubenswrapper[4869]: I0929 15:33:55.094860 4869 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" podStartSLOduration=1.094833687 podStartE2EDuration="1.094833687s" podCreationTimestamp="2025-09-29 15:33:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 15:33:55.091656544 +0000 UTC m=+6761.532300874" watchObservedRunningTime="2025-09-29 15:33:55.094833687 +0000 UTC m=+6761.535478017" Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.086596 4869 generic.go:334] "Generic (PLEG): container finished" podID="64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35" containerID="1f7bc29bf5dca01fa05adc11b4ccf2d4c359b0617480f64a08a5732a94bd7b79" exitCode=0 Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.086668 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" event={"ID":"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35","Type":"ContainerDied","Data":"1f7bc29bf5dca01fa05adc11b4ccf2d4c359b0617480f64a08a5732a94bd7b79"} Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.295876 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktw8x"] Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.296166 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ktw8x" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerName="registry-server" containerID="cri-o://9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4" gracePeriod=2 Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.853992 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.940147 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9h2q\" (UniqueName: \"kubernetes.io/projected/e44f255e-26d9-47d5-a313-4c87e6a80ddb-kube-api-access-p9h2q\") pod \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.940425 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-catalog-content\") pod \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.940917 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-utilities\") pod \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\" (UID: \"e44f255e-26d9-47d5-a313-4c87e6a80ddb\") " Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.944000 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-utilities" (OuterVolumeSpecName: "utilities") pod "e44f255e-26d9-47d5-a313-4c87e6a80ddb" (UID: "e44f255e-26d9-47d5-a313-4c87e6a80ddb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.950340 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e44f255e-26d9-47d5-a313-4c87e6a80ddb-kube-api-access-p9h2q" (OuterVolumeSpecName: "kube-api-access-p9h2q") pod "e44f255e-26d9-47d5-a313-4c87e6a80ddb" (UID: "e44f255e-26d9-47d5-a313-4c87e6a80ddb"). InnerVolumeSpecName "kube-api-access-p9h2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:33:56 crc kubenswrapper[4869]: I0929 15:33:56.972310 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e44f255e-26d9-47d5-a313-4c87e6a80ddb" (UID: "e44f255e-26d9-47d5-a313-4c87e6a80ddb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.044857 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.044897 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9h2q\" (UniqueName: \"kubernetes.io/projected/e44f255e-26d9-47d5-a313-4c87e6a80ddb-kube-api-access-p9h2q\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.044907 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f255e-26d9-47d5-a313-4c87e6a80ddb-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.104020 4869 generic.go:334] "Generic (PLEG): container finished" podID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerID="9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4" exitCode=0 Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.104068 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktw8x" event={"ID":"e44f255e-26d9-47d5-a313-4c87e6a80ddb","Type":"ContainerDied","Data":"9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4"} Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.104127 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktw8x" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.104138 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktw8x" event={"ID":"e44f255e-26d9-47d5-a313-4c87e6a80ddb","Type":"ContainerDied","Data":"75d072ca365e40d6460fcf8cf6031c01162e318e84c580af2dbd4c87047df22e"} Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.104152 4869 scope.go:117] "RemoveContainer" containerID="9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.179019 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.199166 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktw8x"] Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.200398 4869 scope.go:117] "RemoveContainer" containerID="4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.222569 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktw8x"] Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.233587 4869 scope.go:117] "RemoveContainer" containerID="aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.247375 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsf2v\" (UniqueName: \"kubernetes.io/projected/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-kube-api-access-jsf2v\") pod \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\" (UID: \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\") " Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.247500 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-host\") pod \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\" (UID: \"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35\") " Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.248432 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-host" (OuterVolumeSpecName: "host") pod "64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35" (UID: "64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.252566 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-kube-api-access-jsf2v" (OuterVolumeSpecName: "kube-api-access-jsf2v") pod "64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35" (UID: "64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35"). InnerVolumeSpecName "kube-api-access-jsf2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.261276 4869 scope.go:117] "RemoveContainer" containerID="9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4" Sep 29 15:33:57 crc kubenswrapper[4869]: E0929 15:33:57.262657 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4\": container with ID starting with 9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4 not found: ID does not exist" containerID="9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.263048 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4"} err="failed to get container status \"9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4\": rpc error: code = NotFound desc = could not find container \"9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4\": container with ID starting with 9ed6de80494aeb0aa4052ab15a8b16f4874981fd02325c831830d8edcee750b4 not found: ID does not exist" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.263095 4869 scope.go:117] "RemoveContainer" containerID="4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592" Sep 29 15:33:57 crc kubenswrapper[4869]: E0929 15:33:57.263679 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592\": container with ID starting with 4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592 not found: ID does not exist" containerID="4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.263723 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592"} err="failed to get container status \"4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592\": rpc error: code = NotFound desc = could not find container \"4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592\": container with ID starting with 4ce432306617df41b71feb036e1941337d0c781bc0d4460d484508b4e153c592 not found: ID does not exist" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.263751 4869 scope.go:117] "RemoveContainer" containerID="aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386" Sep 29 15:33:57 crc kubenswrapper[4869]: E0929 15:33:57.264195 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386\": container with ID starting with aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386 not found: ID does not exist" containerID="aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.264228 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386"} err="failed to get container status \"aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386\": rpc error: code = NotFound desc = could not find container \"aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386\": container with ID starting with aa452bebad7c2ae134ae2538e7a394fea2416ee376930d12abde8a8dac095386 not found: ID does not exist" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.350999 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsf2v\" (UniqueName: \"kubernetes.io/projected/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-kube-api-access-jsf2v\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:57 crc kubenswrapper[4869]: I0929 15:33:57.351051 4869 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35-host\") on node \"crc\" DevicePath \"\"" Sep 29 15:33:58 crc kubenswrapper[4869]: I0929 15:33:58.115670 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" Sep 29 15:33:58 crc kubenswrapper[4869]: I0929 15:33:58.115679 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-jqd4f" event={"ID":"64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35","Type":"ContainerDied","Data":"17218f0fd261323ba27c8a636e4cac1abf819d8298816b9fdb4d3fe877f1dca3"} Sep 29 15:33:58 crc kubenswrapper[4869]: I0929 15:33:58.115807 4869 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17218f0fd261323ba27c8a636e4cac1abf819d8298816b9fdb4d3fe877f1dca3" Sep 29 15:33:58 crc kubenswrapper[4869]: I0929 15:33:58.258312 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" path="/var/lib/kubelet/pods/e44f255e-26d9-47d5-a313-4c87e6a80ddb/volumes" Sep 29 15:33:59 crc kubenswrapper[4869]: I0929 15:33:59.242666 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:33:59 crc kubenswrapper[4869]: E0929 15:33:59.243266 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:34:03 crc kubenswrapper[4869]: I0929 15:34:03.891480 4869 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:34:03 crc kubenswrapper[4869]: I0929 15:34:03.947936 4869 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:34:04 crc kubenswrapper[4869]: I0929 15:34:04.132559 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lxbfv"] Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.188842 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lxbfv" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="registry-server" containerID="cri-o://d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a" gracePeriod=2 Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.531689 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-jqd4f"] Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.547403 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-jqd4f"] Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.786195 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.844342 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4dgc\" (UniqueName: \"kubernetes.io/projected/8556b1ac-992f-409f-95d7-66299f092c8a-kube-api-access-p4dgc\") pod \"8556b1ac-992f-409f-95d7-66299f092c8a\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.844518 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-utilities\") pod \"8556b1ac-992f-409f-95d7-66299f092c8a\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.844596 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-catalog-content\") pod \"8556b1ac-992f-409f-95d7-66299f092c8a\" (UID: \"8556b1ac-992f-409f-95d7-66299f092c8a\") " Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.845217 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-utilities" (OuterVolumeSpecName: "utilities") pod "8556b1ac-992f-409f-95d7-66299f092c8a" (UID: "8556b1ac-992f-409f-95d7-66299f092c8a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.851482 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8556b1ac-992f-409f-95d7-66299f092c8a-kube-api-access-p4dgc" (OuterVolumeSpecName: "kube-api-access-p4dgc") pod "8556b1ac-992f-409f-95d7-66299f092c8a" (UID: "8556b1ac-992f-409f-95d7-66299f092c8a"). InnerVolumeSpecName "kube-api-access-p4dgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.933737 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8556b1ac-992f-409f-95d7-66299f092c8a" (UID: "8556b1ac-992f-409f-95d7-66299f092c8a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.947499 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4dgc\" (UniqueName: \"kubernetes.io/projected/8556b1ac-992f-409f-95d7-66299f092c8a-kube-api-access-p4dgc\") on node \"crc\" DevicePath \"\"" Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.947557 4869 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-utilities\") on node \"crc\" DevicePath \"\"" Sep 29 15:34:05 crc kubenswrapper[4869]: I0929 15:34:05.947577 4869 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8556b1ac-992f-409f-95d7-66299f092c8a-catalog-content\") on node \"crc\" DevicePath \"\"" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.213276 4869 generic.go:334] "Generic (PLEG): container finished" podID="8556b1ac-992f-409f-95d7-66299f092c8a" containerID="d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a" exitCode=0 Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.213338 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbfv" event={"ID":"8556b1ac-992f-409f-95d7-66299f092c8a","Type":"ContainerDied","Data":"d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a"} Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.213390 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbfv" event={"ID":"8556b1ac-992f-409f-95d7-66299f092c8a","Type":"ContainerDied","Data":"291b07081647c8c3c3c671c4b9fea3da8e16573182d235cf80b472d9b9fdfbf1"} Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.213422 4869 scope.go:117] "RemoveContainer" containerID="d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.214513 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxbfv" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.236058 4869 scope.go:117] "RemoveContainer" containerID="0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.264670 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35" path="/var/lib/kubelet/pods/64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35/volumes" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.265351 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lxbfv"] Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.267508 4869 scope.go:117] "RemoveContainer" containerID="36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.270604 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lxbfv"] Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.311231 4869 scope.go:117] "RemoveContainer" containerID="d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.311840 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a\": container with ID starting with d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a not found: ID does not exist" containerID="d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.311896 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a"} err="failed to get container status \"d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a\": rpc error: code = NotFound desc = could not find container \"d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a\": container with ID starting with d053aee90d49179243ac4a4e84b7b365564a9525142129b0d793f9b2d634959a not found: ID does not exist" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.311932 4869 scope.go:117] "RemoveContainer" containerID="0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.312170 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9\": container with ID starting with 0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9 not found: ID does not exist" containerID="0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.312196 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9"} err="failed to get container status \"0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9\": rpc error: code = NotFound desc = could not find container \"0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9\": container with ID starting with 0b60c18a6597d23610b2dd0c1051704ef4f28ac6cfc1f44344ebfe27350754b9 not found: ID does not exist" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.312214 4869 scope.go:117] "RemoveContainer" containerID="36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.312423 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16\": container with ID starting with 36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16 not found: ID does not exist" containerID="36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.312447 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16"} err="failed to get container status \"36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16\": rpc error: code = NotFound desc = could not find container \"36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16\": container with ID starting with 36cde4b054a1fdfc9673f216016316fd0ed3420dd5027c81c9dccd341cf58b16 not found: ID does not exist" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.761494 4869 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-klf54"] Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.762341 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerName="registry-server" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762363 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerName="registry-server" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.762380 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="extract-utilities" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762388 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="extract-utilities" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.762397 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35" containerName="container-00" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762403 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35" containerName="container-00" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.762415 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="extract-content" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762421 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="extract-content" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.762436 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerName="extract-content" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762442 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerName="extract-content" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.762448 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="registry-server" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762454 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="registry-server" Sep 29 15:34:06 crc kubenswrapper[4869]: E0929 15:34:06.762466 4869 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerName="extract-utilities" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762472 4869 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerName="extract-utilities" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762693 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" containerName="registry-server" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762714 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="64d09ba1-2fbd-45b1-bc9d-2e6cdfabac35" containerName="container-00" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.762725 4869 memory_manager.go:354] "RemoveStaleState removing state" podUID="e44f255e-26d9-47d5-a313-4c87e6a80ddb" containerName="registry-server" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.763477 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.867304 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7ccde8d7-211d-4572-8ba2-6dc519624b76-host\") pod \"crc-debug-klf54\" (UID: \"7ccde8d7-211d-4572-8ba2-6dc519624b76\") " pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.867558 4869 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlgbf\" (UniqueName: \"kubernetes.io/projected/7ccde8d7-211d-4572-8ba2-6dc519624b76-kube-api-access-wlgbf\") pod \"crc-debug-klf54\" (UID: \"7ccde8d7-211d-4572-8ba2-6dc519624b76\") " pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.970450 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7ccde8d7-211d-4572-8ba2-6dc519624b76-host\") pod \"crc-debug-klf54\" (UID: \"7ccde8d7-211d-4572-8ba2-6dc519624b76\") " pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.970575 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7ccde8d7-211d-4572-8ba2-6dc519624b76-host\") pod \"crc-debug-klf54\" (UID: \"7ccde8d7-211d-4572-8ba2-6dc519624b76\") " pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.970781 4869 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlgbf\" (UniqueName: \"kubernetes.io/projected/7ccde8d7-211d-4572-8ba2-6dc519624b76-kube-api-access-wlgbf\") pod \"crc-debug-klf54\" (UID: \"7ccde8d7-211d-4572-8ba2-6dc519624b76\") " pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:06 crc kubenswrapper[4869]: I0929 15:34:06.999052 4869 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlgbf\" (UniqueName: \"kubernetes.io/projected/7ccde8d7-211d-4572-8ba2-6dc519624b76-kube-api-access-wlgbf\") pod \"crc-debug-klf54\" (UID: \"7ccde8d7-211d-4572-8ba2-6dc519624b76\") " pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:07 crc kubenswrapper[4869]: I0929 15:34:07.087905 4869 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:07 crc kubenswrapper[4869]: I0929 15:34:07.229457 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-klf54" event={"ID":"7ccde8d7-211d-4572-8ba2-6dc519624b76","Type":"ContainerStarted","Data":"32e7e54b0ffebf4db81fff0956e47effc6216d2dd98f76ce5f70c517790b97de"} Sep 29 15:34:08 crc kubenswrapper[4869]: I0929 15:34:08.241389 4869 generic.go:334] "Generic (PLEG): container finished" podID="7ccde8d7-211d-4572-8ba2-6dc519624b76" containerID="9bd8f189de9add0ef3c7613183e45a9c4d0095430f1b6da3e584a8a75e12d055" exitCode=0 Sep 29 15:34:08 crc kubenswrapper[4869]: I0929 15:34:08.257137 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8556b1ac-992f-409f-95d7-66299f092c8a" path="/var/lib/kubelet/pods/8556b1ac-992f-409f-95d7-66299f092c8a/volumes" Sep 29 15:34:08 crc kubenswrapper[4869]: I0929 15:34:08.258063 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/crc-debug-klf54" event={"ID":"7ccde8d7-211d-4572-8ba2-6dc519624b76","Type":"ContainerDied","Data":"9bd8f189de9add0ef3c7613183e45a9c4d0095430f1b6da3e584a8a75e12d055"} Sep 29 15:34:08 crc kubenswrapper[4869]: I0929 15:34:08.282054 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-klf54"] Sep 29 15:34:08 crc kubenswrapper[4869]: I0929 15:34:08.293202 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6rnl6/crc-debug-klf54"] Sep 29 15:34:09 crc kubenswrapper[4869]: I0929 15:34:09.393567 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:09 crc kubenswrapper[4869]: I0929 15:34:09.424125 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlgbf\" (UniqueName: \"kubernetes.io/projected/7ccde8d7-211d-4572-8ba2-6dc519624b76-kube-api-access-wlgbf\") pod \"7ccde8d7-211d-4572-8ba2-6dc519624b76\" (UID: \"7ccde8d7-211d-4572-8ba2-6dc519624b76\") " Sep 29 15:34:09 crc kubenswrapper[4869]: I0929 15:34:09.424391 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7ccde8d7-211d-4572-8ba2-6dc519624b76-host\") pod \"7ccde8d7-211d-4572-8ba2-6dc519624b76\" (UID: \"7ccde8d7-211d-4572-8ba2-6dc519624b76\") " Sep 29 15:34:09 crc kubenswrapper[4869]: I0929 15:34:09.424567 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7ccde8d7-211d-4572-8ba2-6dc519624b76-host" (OuterVolumeSpecName: "host") pod "7ccde8d7-211d-4572-8ba2-6dc519624b76" (UID: "7ccde8d7-211d-4572-8ba2-6dc519624b76"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Sep 29 15:34:09 crc kubenswrapper[4869]: I0929 15:34:09.425129 4869 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7ccde8d7-211d-4572-8ba2-6dc519624b76-host\") on node \"crc\" DevicePath \"\"" Sep 29 15:34:09 crc kubenswrapper[4869]: I0929 15:34:09.430641 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ccde8d7-211d-4572-8ba2-6dc519624b76-kube-api-access-wlgbf" (OuterVolumeSpecName: "kube-api-access-wlgbf") pod "7ccde8d7-211d-4572-8ba2-6dc519624b76" (UID: "7ccde8d7-211d-4572-8ba2-6dc519624b76"). InnerVolumeSpecName "kube-api-access-wlgbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:34:09 crc kubenswrapper[4869]: I0929 15:34:09.527947 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlgbf\" (UniqueName: \"kubernetes.io/projected/7ccde8d7-211d-4572-8ba2-6dc519624b76-kube-api-access-wlgbf\") on node \"crc\" DevicePath \"\"" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.255938 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ccde8d7-211d-4572-8ba2-6dc519624b76" path="/var/lib/kubelet/pods/7ccde8d7-211d-4572-8ba2-6dc519624b76/volumes" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.285552 4869 scope.go:117] "RemoveContainer" containerID="9bd8f189de9add0ef3c7613183e45a9c4d0095430f1b6da3e584a8a75e12d055" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.285652 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/crc-debug-klf54" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.315319 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9_4646821b-b147-4416-bb71-1d722d94a87a/util/0.log" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.591837 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9_4646821b-b147-4416-bb71-1d722d94a87a/pull/0.log" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.600259 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9_4646821b-b147-4416-bb71-1d722d94a87a/pull/0.log" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.605752 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9_4646821b-b147-4416-bb71-1d722d94a87a/util/0.log" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.877377 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9_4646821b-b147-4416-bb71-1d722d94a87a/pull/0.log" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.936957 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9_4646821b-b147-4416-bb71-1d722d94a87a/util/0.log" Sep 29 15:34:10 crc kubenswrapper[4869]: I0929 15:34:10.944656 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_095701bfb0c114d1670b1812f79cb2adf3a2f7c714cea82a352ac9023dwbxw9_4646821b-b147-4416-bb71-1d722d94a87a/extract/0.log" Sep 29 15:34:11 crc kubenswrapper[4869]: I0929 15:34:11.123300 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6495d75b5-qq2wr_6ed6de20-a7c3-40be-bc68-ff4f978dba14/kube-rbac-proxy/0.log" Sep 29 15:34:11 crc kubenswrapper[4869]: I0929 15:34:11.251800 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748c574d75-shlvx_f719d8af-e87a-4f02-bd10-fe1a2899b71d/kube-rbac-proxy/0.log" Sep 29 15:34:11 crc kubenswrapper[4869]: I0929 15:34:11.254552 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6495d75b5-qq2wr_6ed6de20-a7c3-40be-bc68-ff4f978dba14/manager/0.log" Sep 29 15:34:11 crc kubenswrapper[4869]: I0929 15:34:11.477362 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748c574d75-shlvx_f719d8af-e87a-4f02-bd10-fe1a2899b71d/manager/0.log" Sep 29 15:34:11 crc kubenswrapper[4869]: I0929 15:34:11.547390 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d74f4d695-xxmd5_e2386607-ebad-4616-9e23-d81e2c64350c/manager/0.log" Sep 29 15:34:11 crc kubenswrapper[4869]: I0929 15:34:11.558312 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d74f4d695-xxmd5_e2386607-ebad-4616-9e23-d81e2c64350c/kube-rbac-proxy/0.log" Sep 29 15:34:11 crc kubenswrapper[4869]: I0929 15:34:11.997869 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-67b5d44b7f-57g9g_d7f3dd6f-b5bd-482e-a41a-e426459a8bfd/kube-rbac-proxy/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.160804 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-67b5d44b7f-57g9g_d7f3dd6f-b5bd-482e-a41a-e426459a8bfd/manager/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.279344 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-8ff95898-h254r_43f73602-1bf6-4550-bad6-ce9cedaa6955/manager/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.289156 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-8ff95898-h254r_43f73602-1bf6-4550-bad6-ce9cedaa6955/kube-rbac-proxy/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.380710 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-695847bc78-578dq_17b6d42c-924b-48c9-9a78-13cb3a8d7776/kube-rbac-proxy/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.500270 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-695847bc78-578dq_17b6d42c-924b-48c9-9a78-13cb3a8d7776/manager/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.624358 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-858cd69f49-lxvwh_b76e3341-e4f0-4711-95cc-874919666585/kube-rbac-proxy/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.893092 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-858cd69f49-lxvwh_b76e3341-e4f0-4711-95cc-874919666585/manager/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.900711 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-9fc8d5567-lsch2_bfc73681-1e32-4e79-818f-944b609ef92b/kube-rbac-proxy/0.log" Sep 29 15:34:12 crc kubenswrapper[4869]: I0929 15:34:12.942998 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-9fc8d5567-lsch2_bfc73681-1e32-4e79-818f-944b609ef92b/manager/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.156081 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7bf498966c-z9j4w_c13c38cf-c074-4e90-a79f-58ad4a24db6e/kube-rbac-proxy/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.204296 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7bf498966c-z9j4w_c13c38cf-c074-4e90-a79f-58ad4a24db6e/manager/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.279546 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-56cf9c6b99-2lgbz_7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0/kube-rbac-proxy/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.485048 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-56cf9c6b99-2lgbz_7ddf27f6-9984-44ee-9d1d-3aa6e38b2af0/manager/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.500316 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-687b9cf756-l57c7_8667518f-048c-48b3-b838-3ff38cbc76b7/kube-rbac-proxy/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.560917 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-687b9cf756-l57c7_8667518f-048c-48b3-b838-3ff38cbc76b7/manager/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.729721 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-54d766c9f9-ggvvf_6f08d81a-137d-43b2-8c78-8227d4cd848c/kube-rbac-proxy/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.742126 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-54d766c9f9-ggvvf_6f08d81a-137d-43b2-8c78-8227d4cd848c/manager/0.log" Sep 29 15:34:13 crc kubenswrapper[4869]: I0929 15:34:13.972299 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-c7c776c96-v9wbw_2a0fb7a7-5469-4513-ac6f-3ce8f28b9310/kube-rbac-proxy/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.116513 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-c7c776c96-v9wbw_2a0fb7a7-5469-4513-ac6f-3ce8f28b9310/manager/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.212885 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-76fcc6dc7c-dlvlw_ad5406a4-1938-49ee-87ef-cae347abba83/manager/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.240285 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-76fcc6dc7c-dlvlw_ad5406a4-1938-49ee-87ef-cae347abba83/kube-rbac-proxy/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.252135 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:34:14 crc kubenswrapper[4869]: E0929 15:34:14.252881 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.338086 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6d776955-jq4lw_cc44f555-b327-4135-a59c-5c085be0ca2e/kube-rbac-proxy/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.417648 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6d776955-jq4lw_cc44f555-b327-4135-a59c-5c085be0ca2e/manager/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.543938 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-cdbfd4cbb-hbh55_a923032e-f8f0-4622-8de9-01973ec22782/kube-rbac-proxy/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.679413 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7d6679759-s65sr_177c83a1-b737-4c5e-b046-260e28bb6e4e/kube-rbac-proxy/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.915056 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-rpv4c_fe014b15-35d6-4091-aece-db0abc23b4a9/registry-server/0.log" Sep 29 15:34:14 crc kubenswrapper[4869]: I0929 15:34:14.967829 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7d6679759-s65sr_177c83a1-b737-4c5e-b046-260e28bb6e4e/operator/0.log" Sep 29 15:34:15 crc kubenswrapper[4869]: I0929 15:34:15.092418 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5f95c46c78-5wk7g_6f94106a-7f34-4265-a988-c90ac7466919/kube-rbac-proxy/0.log" Sep 29 15:34:15 crc kubenswrapper[4869]: I0929 15:34:15.276375 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5f95c46c78-5wk7g_6f94106a-7f34-4265-a988-c90ac7466919/manager/0.log" Sep 29 15:34:15 crc kubenswrapper[4869]: I0929 15:34:15.282755 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-774b97b48-8bn6g_2716f7dc-4fa9-46a8-abcb-71016098e732/kube-rbac-proxy/0.log" Sep 29 15:34:15 crc kubenswrapper[4869]: I0929 15:34:15.375091 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-774b97b48-8bn6g_2716f7dc-4fa9-46a8-abcb-71016098e732/manager/0.log" Sep 29 15:34:15 crc kubenswrapper[4869]: I0929 15:34:15.514533 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-79d8469568-j4gqm_e73fced4-cf05-4c8d-b1af-39c07ef69514/operator/0.log" Sep 29 15:34:15 crc kubenswrapper[4869]: I0929 15:34:15.677815 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-bc7dc7bd9-59sjm_bc76592b-e1dc-41f2-8696-8edd7e3d8315/kube-rbac-proxy/0.log" Sep 29 15:34:15 crc kubenswrapper[4869]: I0929 15:34:15.761673 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-bc7dc7bd9-59sjm_bc76592b-e1dc-41f2-8696-8edd7e3d8315/manager/0.log" Sep 29 15:34:15 crc kubenswrapper[4869]: I0929 15:34:15.874311 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5bf96cfbc4-rhc9x_436fe0ad-2010-46f2-ad86-ee243370d675/kube-rbac-proxy/0.log" Sep 29 15:34:16 crc kubenswrapper[4869]: I0929 15:34:16.128243 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-f66b554c6-xmgtg_9eb7b21e-54fc-4b89-977a-6ad6481e7237/manager/0.log" Sep 29 15:34:16 crc kubenswrapper[4869]: I0929 15:34:16.185550 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-f66b554c6-xmgtg_9eb7b21e-54fc-4b89-977a-6ad6481e7237/kube-rbac-proxy/0.log" Sep 29 15:34:16 crc kubenswrapper[4869]: I0929 15:34:16.362001 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-cdbfd4cbb-hbh55_a923032e-f8f0-4622-8de9-01973ec22782/manager/0.log" Sep 29 15:34:16 crc kubenswrapper[4869]: I0929 15:34:16.413203 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5bf96cfbc4-rhc9x_436fe0ad-2010-46f2-ad86-ee243370d675/manager/0.log" Sep 29 15:34:16 crc kubenswrapper[4869]: I0929 15:34:16.471859 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5f5b8d96d6-ppscx_aa4f1345-cc28-491e-876c-a125e73b4a8b/manager/0.log" Sep 29 15:34:16 crc kubenswrapper[4869]: I0929 15:34:16.480965 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5f5b8d96d6-ppscx_aa4f1345-cc28-491e-876c-a125e73b4a8b/kube-rbac-proxy/0.log" Sep 29 15:34:29 crc kubenswrapper[4869]: I0929 15:34:29.242895 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:34:29 crc kubenswrapper[4869]: E0929 15:34:29.244131 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:34:33 crc kubenswrapper[4869]: I0929 15:34:33.398320 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-bzpd6_b5d3f667-2064-442a-a0a7-899f35a00d9f/control-plane-machine-set-operator/0.log" Sep 29 15:34:33 crc kubenswrapper[4869]: I0929 15:34:33.639896 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-k2ml4_ad6570a1-7f62-46b4-87e8-6ddea76c4101/kube-rbac-proxy/0.log" Sep 29 15:34:33 crc kubenswrapper[4869]: I0929 15:34:33.640127 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-k2ml4_ad6570a1-7f62-46b4-87e8-6ddea76c4101/machine-api-operator/0.log" Sep 29 15:34:40 crc kubenswrapper[4869]: I0929 15:34:40.242527 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:34:40 crc kubenswrapper[4869]: E0929 15:34:40.243371 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:34:45 crc kubenswrapper[4869]: I0929 15:34:45.941560 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-mbrxl_d49de4d2-6830-45ee-adf5-57fa0cfc58ec/cert-manager-controller/0.log" Sep 29 15:34:46 crc kubenswrapper[4869]: I0929 15:34:46.180060 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-mbljb_bba4c7ae-ab90-4f36-b529-0fb96008204c/cert-manager-cainjector/0.log" Sep 29 15:34:46 crc kubenswrapper[4869]: I0929 15:34:46.207187 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-l5274_a1343538-595b-4e8b-9c3a-dbb5abd2607d/cert-manager-webhook/0.log" Sep 29 15:34:54 crc kubenswrapper[4869]: I0929 15:34:54.249251 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:34:54 crc kubenswrapper[4869]: E0929 15:34:54.250145 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:34:58 crc kubenswrapper[4869]: I0929 15:34:58.161739 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-864bb6dfb5-874wj_9f2bacad-6ff8-42ec-ad98-41fdf32e6eaa/nmstate-console-plugin/0.log" Sep 29 15:34:58 crc kubenswrapper[4869]: I0929 15:34:58.279259 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-zckq9_f7cec7ed-cfcf-4e48-8146-259ffff9cebf/nmstate-handler/0.log" Sep 29 15:34:58 crc kubenswrapper[4869]: I0929 15:34:58.371773 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-58fcddf996-xzscn_4cc9f3a9-a79b-4734-906b-fe99f71ff3ca/kube-rbac-proxy/0.log" Sep 29 15:34:58 crc kubenswrapper[4869]: I0929 15:34:58.416392 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-58fcddf996-xzscn_4cc9f3a9-a79b-4734-906b-fe99f71ff3ca/nmstate-metrics/0.log" Sep 29 15:34:58 crc kubenswrapper[4869]: I0929 15:34:58.587543 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5d6f6cfd66-klk2d_10bcf6b2-b103-4427-a177-b2123f0a942e/nmstate-operator/0.log" Sep 29 15:34:58 crc kubenswrapper[4869]: I0929 15:34:58.639709 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6d689559c5-lrbmp_785a50f5-f92b-4774-a576-66b1f85cdbab/nmstate-webhook/0.log" Sep 29 15:35:05 crc kubenswrapper[4869]: I0929 15:35:05.242600 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:35:05 crc kubenswrapper[4869]: E0929 15:35:05.243252 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:35:12 crc kubenswrapper[4869]: I0929 15:35:12.609128 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5d688f5ffc-6lrfv_c178702e-60a4-430d-b3e3-58e59663407e/kube-rbac-proxy/0.log" Sep 29 15:35:12 crc kubenswrapper[4869]: I0929 15:35:12.748399 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5d688f5ffc-6lrfv_c178702e-60a4-430d-b3e3-58e59663407e/controller/0.log" Sep 29 15:35:12 crc kubenswrapper[4869]: I0929 15:35:12.868140 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-frr-files/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.103366 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-reloader/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.110890 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-frr-files/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.162978 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-metrics/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.172529 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-reloader/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.368447 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-reloader/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.379974 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-frr-files/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.399820 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-metrics/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.433434 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-metrics/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.573170 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-metrics/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.595796 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-reloader/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.607094 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/cp-frr-files/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.648238 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/controller/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.804911 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/frr-metrics/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.819020 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/kube-rbac-proxy/0.log" Sep 29 15:35:13 crc kubenswrapper[4869]: I0929 15:35:13.885709 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/kube-rbac-proxy-frr/0.log" Sep 29 15:35:14 crc kubenswrapper[4869]: I0929 15:35:14.137313 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/reloader/0.log" Sep 29 15:35:14 crc kubenswrapper[4869]: I0929 15:35:14.161955 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-5478bdb765-6fj4l_37e44d72-369b-4c65-90f3-73b692729b60/frr-k8s-webhook-server/0.log" Sep 29 15:35:14 crc kubenswrapper[4869]: I0929 15:35:14.408711 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6966c7c87b-ck8s9_a4c44dd4-f417-4b2b-9c69-14ec65edd173/manager/0.log" Sep 29 15:35:14 crc kubenswrapper[4869]: I0929 15:35:14.683634 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7f65f6b848-6v8dg_54cee50d-edae-4d32-99e2-83b8f8e5b99c/webhook-server/0.log" Sep 29 15:35:14 crc kubenswrapper[4869]: I0929 15:35:14.780411 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xtbwg_26a588ca-3edb-42f4-b436-eccd007e8cbc/kube-rbac-proxy/0.log" Sep 29 15:35:15 crc kubenswrapper[4869]: I0929 15:35:15.650439 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xtbwg_26a588ca-3edb-42f4-b436-eccd007e8cbc/speaker/0.log" Sep 29 15:35:16 crc kubenswrapper[4869]: I0929 15:35:16.028880 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-psh4w_1e5457a7-a8ac-4d17-aa3d-b01bbb10fe01/frr/0.log" Sep 29 15:35:18 crc kubenswrapper[4869]: I0929 15:35:18.242346 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:35:18 crc kubenswrapper[4869]: E0929 15:35:18.243031 4869 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-mrhp2_openshift-machine-config-operator(c2cb4b77-d447-4866-ac1e-eb4f0b4babae)\"" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" Sep 29 15:35:28 crc kubenswrapper[4869]: I0929 15:35:28.747659 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9_d5ba66e2-b35f-49f1-81aa-5b3007724d39/util/0.log" Sep 29 15:35:28 crc kubenswrapper[4869]: I0929 15:35:28.972470 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9_d5ba66e2-b35f-49f1-81aa-5b3007724d39/util/0.log" Sep 29 15:35:28 crc kubenswrapper[4869]: I0929 15:35:28.989747 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9_d5ba66e2-b35f-49f1-81aa-5b3007724d39/pull/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.048670 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9_d5ba66e2-b35f-49f1-81aa-5b3007724d39/pull/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.234519 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9_d5ba66e2-b35f-49f1-81aa-5b3007724d39/util/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.292437 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9_d5ba66e2-b35f-49f1-81aa-5b3007724d39/pull/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.302567 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9a6e092ce660f08e14c0b0ceab3711fa43f2b70244f9df8a7a069040bcwjcj9_d5ba66e2-b35f-49f1-81aa-5b3007724d39/extract/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.469081 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz_64a7ea48-cd6e-4103-bb96-ab537c59c710/util/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.647694 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz_64a7ea48-cd6e-4103-bb96-ab537c59c710/util/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.683200 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz_64a7ea48-cd6e-4103-bb96-ab537c59c710/pull/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.726151 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz_64a7ea48-cd6e-4103-bb96-ab537c59c710/pull/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.867628 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz_64a7ea48-cd6e-4103-bb96-ab537c59c710/util/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.907334 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz_64a7ea48-cd6e-4103-bb96-ab537c59c710/extract/0.log" Sep 29 15:35:29 crc kubenswrapper[4869]: I0929 15:35:29.951559 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2d4g6vz_64a7ea48-cd6e-4103-bb96-ab537c59c710/pull/0.log" Sep 29 15:35:30 crc kubenswrapper[4869]: I0929 15:35:30.067166 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-64xnh_bdf41451-b799-4eea-a0fb-2804371471b7/extract-utilities/0.log" Sep 29 15:35:30 crc kubenswrapper[4869]: I0929 15:35:30.242579 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:35:30 crc kubenswrapper[4869]: I0929 15:35:30.321168 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-64xnh_bdf41451-b799-4eea-a0fb-2804371471b7/extract-utilities/0.log" Sep 29 15:35:30 crc kubenswrapper[4869]: I0929 15:35:30.341844 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-64xnh_bdf41451-b799-4eea-a0fb-2804371471b7/extract-content/0.log" Sep 29 15:35:30 crc kubenswrapper[4869]: I0929 15:35:30.377765 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-64xnh_bdf41451-b799-4eea-a0fb-2804371471b7/extract-content/0.log" Sep 29 15:35:30 crc kubenswrapper[4869]: I0929 15:35:30.860060 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-64xnh_bdf41451-b799-4eea-a0fb-2804371471b7/extract-content/0.log" Sep 29 15:35:30 crc kubenswrapper[4869]: I0929 15:35:30.913677 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-64xnh_bdf41451-b799-4eea-a0fb-2804371471b7/extract-utilities/0.log" Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.148701 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-7797k_0b98faa6-4adb-4767-adb6-504b9a6e2eb7/extract-utilities/0.log" Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.172762 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"b64610902fca8b0da7258ca31b622b5715bd01ab8003e566e5881267a6ca7554"} Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.262485 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-64xnh_bdf41451-b799-4eea-a0fb-2804371471b7/registry-server/0.log" Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.415769 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-7797k_0b98faa6-4adb-4767-adb6-504b9a6e2eb7/extract-utilities/0.log" Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.457176 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-7797k_0b98faa6-4adb-4767-adb6-504b9a6e2eb7/extract-content/0.log" Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.462141 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-7797k_0b98faa6-4adb-4767-adb6-504b9a6e2eb7/extract-content/0.log" Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.627442 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-7797k_0b98faa6-4adb-4767-adb6-504b9a6e2eb7/extract-utilities/0.log" Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.664390 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-7797k_0b98faa6-4adb-4767-adb6-504b9a6e2eb7/extract-content/0.log" Sep 29 15:35:31 crc kubenswrapper[4869]: I0929 15:35:31.947187 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht_4233fe38-bc5e-4f0e-a692-a14ef606c9ca/util/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.206990 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht_4233fe38-bc5e-4f0e-a692-a14ef606c9ca/util/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.268655 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht_4233fe38-bc5e-4f0e-a692-a14ef606c9ca/pull/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.305938 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht_4233fe38-bc5e-4f0e-a692-a14ef606c9ca/pull/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.498443 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht_4233fe38-bc5e-4f0e-a692-a14ef606c9ca/pull/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.521104 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht_4233fe38-bc5e-4f0e-a692-a14ef606c9ca/util/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.564589 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f29efc416ca216184f30dbb4b19e0f463bdcecc8ef634322abbad88d96sxmht_4233fe38-bc5e-4f0e-a692-a14ef606c9ca/extract/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.773733 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-tssgg_22f29fa0-8b84-4865-b786-53dc0a324c3b/marketplace-operator/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.807577 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zptkp_c6d1c827-7199-4858-a9ae-1515c38a2e57/extract-utilities/0.log" Sep 29 15:35:32 crc kubenswrapper[4869]: I0929 15:35:32.930358 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-7797k_0b98faa6-4adb-4767-adb6-504b9a6e2eb7/registry-server/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.085764 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zptkp_c6d1c827-7199-4858-a9ae-1515c38a2e57/extract-content/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.088839 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zptkp_c6d1c827-7199-4858-a9ae-1515c38a2e57/extract-content/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.101363 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zptkp_c6d1c827-7199-4858-a9ae-1515c38a2e57/extract-utilities/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.341117 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zptkp_c6d1c827-7199-4858-a9ae-1515c38a2e57/extract-utilities/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.349024 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zptkp_c6d1c827-7199-4858-a9ae-1515c38a2e57/extract-content/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.441750 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5t5v2_2b1e4d3b-67b6-473f-bda1-b2fd0f253f97/extract-utilities/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.645708 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zptkp_c6d1c827-7199-4858-a9ae-1515c38a2e57/registry-server/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.669158 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5t5v2_2b1e4d3b-67b6-473f-bda1-b2fd0f253f97/extract-content/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.687086 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5t5v2_2b1e4d3b-67b6-473f-bda1-b2fd0f253f97/extract-utilities/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.692942 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5t5v2_2b1e4d3b-67b6-473f-bda1-b2fd0f253f97/extract-content/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.945171 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5t5v2_2b1e4d3b-67b6-473f-bda1-b2fd0f253f97/extract-utilities/0.log" Sep 29 15:35:33 crc kubenswrapper[4869]: I0929 15:35:33.950507 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5t5v2_2b1e4d3b-67b6-473f-bda1-b2fd0f253f97/extract-content/0.log" Sep 29 15:35:34 crc kubenswrapper[4869]: I0929 15:35:34.791197 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5t5v2_2b1e4d3b-67b6-473f-bda1-b2fd0f253f97/registry-server/0.log" Sep 29 15:35:48 crc kubenswrapper[4869]: I0929 15:35:48.063015 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-7c8cf85677-7gl6v_bfd220db-47be-491b-bfe7-5962c10c099b/prometheus-operator/0.log" Sep 29 15:35:48 crc kubenswrapper[4869]: I0929 15:35:48.300579 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b549cd94f-v7k9t_81b26c9f-a443-43c7-bebe-4f5d9b5261b5/prometheus-operator-admission-webhook/0.log" Sep 29 15:35:48 crc kubenswrapper[4869]: I0929 15:35:48.423279 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b549cd94f-zdpft_5d741f06-f7fa-4727-bf0b-4047c52949de/prometheus-operator-admission-webhook/0.log" Sep 29 15:35:48 crc kubenswrapper[4869]: I0929 15:35:48.549473 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-cc5f78dfc-527r2_6edcb3e2-2dbc-4d9c-a762-13d7cf4fdcad/operator/0.log" Sep 29 15:35:48 crc kubenswrapper[4869]: I0929 15:35:48.645762 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-54bc95c9fb-n98tr_553217bb-4f98-4170-94e2-c809a866c927/perses-operator/0.log" Sep 29 15:36:12 crc kubenswrapper[4869]: E0929 15:36:12.667984 4869 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.80:48188->38.102.83.80:45233: write tcp 38.102.83.80:48188->38.102.83.80:45233: write: broken pipe Sep 29 15:37:27 crc kubenswrapper[4869]: I0929 15:37:27.746907 4869 scope.go:117] "RemoveContainer" containerID="c3f16e6cc30b88b4f7816bd834da18d2a029ad2df59e36426bfa9c28822bf21c" Sep 29 15:37:50 crc kubenswrapper[4869]: I0929 15:37:50.657523 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:37:50 crc kubenswrapper[4869]: I0929 15:37:50.658076 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:38:20 crc kubenswrapper[4869]: I0929 15:38:20.657928 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:38:20 crc kubenswrapper[4869]: I0929 15:38:20.658502 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:38:49 crc kubenswrapper[4869]: I0929 15:38:49.446641 4869 generic.go:334] "Generic (PLEG): container finished" podID="261b922e-a850-4402-b06c-c40aa26e5a67" containerID="cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d" exitCode=0 Sep 29 15:38:49 crc kubenswrapper[4869]: I0929 15:38:49.446696 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6rnl6/must-gather-n27l6" event={"ID":"261b922e-a850-4402-b06c-c40aa26e5a67","Type":"ContainerDied","Data":"cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d"} Sep 29 15:38:49 crc kubenswrapper[4869]: I0929 15:38:49.448988 4869 scope.go:117] "RemoveContainer" containerID="cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d" Sep 29 15:38:50 crc kubenswrapper[4869]: I0929 15:38:50.257508 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6rnl6_must-gather-n27l6_261b922e-a850-4402-b06c-c40aa26e5a67/gather/0.log" Sep 29 15:38:50 crc kubenswrapper[4869]: I0929 15:38:50.669531 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:38:50 crc kubenswrapper[4869]: I0929 15:38:50.669644 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:38:50 crc kubenswrapper[4869]: I0929 15:38:50.669708 4869 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" Sep 29 15:38:50 crc kubenswrapper[4869]: I0929 15:38:50.670801 4869 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b64610902fca8b0da7258ca31b622b5715bd01ab8003e566e5881267a6ca7554"} pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Sep 29 15:38:50 crc kubenswrapper[4869]: I0929 15:38:50.670874 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" containerID="cri-o://b64610902fca8b0da7258ca31b622b5715bd01ab8003e566e5881267a6ca7554" gracePeriod=600 Sep 29 15:38:51 crc kubenswrapper[4869]: I0929 15:38:51.471411 4869 generic.go:334] "Generic (PLEG): container finished" podID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerID="b64610902fca8b0da7258ca31b622b5715bd01ab8003e566e5881267a6ca7554" exitCode=0 Sep 29 15:38:51 crc kubenswrapper[4869]: I0929 15:38:51.471479 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerDied","Data":"b64610902fca8b0da7258ca31b622b5715bd01ab8003e566e5881267a6ca7554"} Sep 29 15:38:51 crc kubenswrapper[4869]: I0929 15:38:51.471737 4869 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" event={"ID":"c2cb4b77-d447-4866-ac1e-eb4f0b4babae","Type":"ContainerStarted","Data":"7e6fe6d09dca68a6aec0cf6db30a2089a0145218709bdc6975a23321697d02bb"} Sep 29 15:38:51 crc kubenswrapper[4869]: I0929 15:38:51.471764 4869 scope.go:117] "RemoveContainer" containerID="98226534f4b19575a5cc1b77d47f89a6bc2ee69233afe293bf5e1d575e9720cc" Sep 29 15:38:59 crc kubenswrapper[4869]: I0929 15:38:59.479237 4869 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6rnl6/must-gather-n27l6"] Sep 29 15:38:59 crc kubenswrapper[4869]: I0929 15:38:59.480289 4869 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-6rnl6/must-gather-n27l6" podUID="261b922e-a850-4402-b06c-c40aa26e5a67" containerName="copy" containerID="cri-o://501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69" gracePeriod=2 Sep 29 15:38:59 crc kubenswrapper[4869]: I0929 15:38:59.495458 4869 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6rnl6/must-gather-n27l6"] Sep 29 15:38:59 crc kubenswrapper[4869]: I0929 15:38:59.962285 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6rnl6_must-gather-n27l6_261b922e-a850-4402-b06c-c40aa26e5a67/copy/0.log" Sep 29 15:38:59 crc kubenswrapper[4869]: I0929 15:38:59.963251 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.105953 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/261b922e-a850-4402-b06c-c40aa26e5a67-must-gather-output\") pod \"261b922e-a850-4402-b06c-c40aa26e5a67\" (UID: \"261b922e-a850-4402-b06c-c40aa26e5a67\") " Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.106108 4869 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdmhz\" (UniqueName: \"kubernetes.io/projected/261b922e-a850-4402-b06c-c40aa26e5a67-kube-api-access-tdmhz\") pod \"261b922e-a850-4402-b06c-c40aa26e5a67\" (UID: \"261b922e-a850-4402-b06c-c40aa26e5a67\") " Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.113249 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/261b922e-a850-4402-b06c-c40aa26e5a67-kube-api-access-tdmhz" (OuterVolumeSpecName: "kube-api-access-tdmhz") pod "261b922e-a850-4402-b06c-c40aa26e5a67" (UID: "261b922e-a850-4402-b06c-c40aa26e5a67"). InnerVolumeSpecName "kube-api-access-tdmhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.210131 4869 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdmhz\" (UniqueName: \"kubernetes.io/projected/261b922e-a850-4402-b06c-c40aa26e5a67-kube-api-access-tdmhz\") on node \"crc\" DevicePath \"\"" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.496224 4869 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/261b922e-a850-4402-b06c-c40aa26e5a67-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "261b922e-a850-4402-b06c-c40aa26e5a67" (UID: "261b922e-a850-4402-b06c-c40aa26e5a67"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.543914 4869 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/261b922e-a850-4402-b06c-c40aa26e5a67-must-gather-output\") on node \"crc\" DevicePath \"\"" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.574462 4869 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6rnl6_must-gather-n27l6_261b922e-a850-4402-b06c-c40aa26e5a67/copy/0.log" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.574889 4869 generic.go:334] "Generic (PLEG): container finished" podID="261b922e-a850-4402-b06c-c40aa26e5a67" containerID="501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69" exitCode=143 Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.574956 4869 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6rnl6/must-gather-n27l6" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.574979 4869 scope.go:117] "RemoveContainer" containerID="501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.606878 4869 scope.go:117] "RemoveContainer" containerID="cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.683003 4869 scope.go:117] "RemoveContainer" containerID="501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69" Sep 29 15:39:00 crc kubenswrapper[4869]: E0929 15:39:00.683989 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69\": container with ID starting with 501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69 not found: ID does not exist" containerID="501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.684039 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69"} err="failed to get container status \"501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69\": rpc error: code = NotFound desc = could not find container \"501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69\": container with ID starting with 501897de6c74990def25f0d43096524bafc37b9f303134471ebec6ec4d54de69 not found: ID does not exist" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.684075 4869 scope.go:117] "RemoveContainer" containerID="cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d" Sep 29 15:39:00 crc kubenswrapper[4869]: E0929 15:39:00.685540 4869 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d\": container with ID starting with cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d not found: ID does not exist" containerID="cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d" Sep 29 15:39:00 crc kubenswrapper[4869]: I0929 15:39:00.685585 4869 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d"} err="failed to get container status \"cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d\": rpc error: code = NotFound desc = could not find container \"cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d\": container with ID starting with cfbb493d95a0283ee186185833d51be30b3bd3342676501dd5eda25f1b867b3d not found: ID does not exist" Sep 29 15:39:02 crc kubenswrapper[4869]: I0929 15:39:02.254194 4869 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="261b922e-a850-4402-b06c-c40aa26e5a67" path="/var/lib/kubelet/pods/261b922e-a850-4402-b06c-c40aa26e5a67/volumes" Sep 29 15:40:27 crc kubenswrapper[4869]: I0929 15:40:27.880306 4869 scope.go:117] "RemoveContainer" containerID="1f7bc29bf5dca01fa05adc11b4ccf2d4c359b0617480f64a08a5732a94bd7b79" Sep 29 15:41:20 crc kubenswrapper[4869]: I0929 15:41:20.657387 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:41:20 crc kubenswrapper[4869]: I0929 15:41:20.657920 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Sep 29 15:41:50 crc kubenswrapper[4869]: I0929 15:41:50.657698 4869 patch_prober.go:28] interesting pod/machine-config-daemon-mrhp2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Sep 29 15:41:50 crc kubenswrapper[4869]: I0929 15:41:50.658344 4869 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-mrhp2" podUID="c2cb4b77-d447-4866-ac1e-eb4f0b4babae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515066524342024454 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015066524342017371 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015066505605016515 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015066505606015466 5ustar corecore